diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2023-08-07 12:04:05 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:10:10 -0400 |
commit | 1e81f89b020758fb424f8bb0f13405706d29dfc7 (patch) | |
tree | 7ae51550659bf2f5e19595ebdb377d1343001382 | |
parent | 6fe893eade864665c0956a2ac2eff78b86dc8145 (diff) |
bcachefs: Fix assorted checkpatch nits
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/alloc_types.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_gc.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.c | 8 | ||||
-rw-r--r-- | fs/bcachefs/btree_update.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/buckets.c | 1 | ||||
-rw-r--r-- | fs/bcachefs/chardev.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/checksum.c | 12 | ||||
-rw-r--r-- | fs/bcachefs/compress.c | 3 | ||||
-rw-r--r-- | fs/bcachefs/extents.c | 12 | ||||
-rw-r--r-- | fs/bcachefs/fs-io.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/io.c | 1 | ||||
-rw-r--r-- | fs/bcachefs/io.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/journal_io.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/journal_reclaim.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/recovery.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/super-io.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/super-io.h | 1 | ||||
-rw-r--r-- | fs/bcachefs/util.c | 11 | ||||
-rw-r--r-- | fs/bcachefs/util.h | 8 | ||||
-rw-r--r-- | fs/bcachefs/varint.c | 1 |
22 files changed, 55 insertions, 39 deletions
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h index 804a843f23c1..b91b7a461056 100644 --- a/fs/bcachefs/alloc_types.h +++ b/fs/bcachefs/alloc_types.h @@ -105,7 +105,7 @@ struct write_point { struct dev_stripe_state stripe; u64 sectors_allocated; - } __attribute__((__aligned__(SMP_CACHE_BYTES))); + } __aligned(SMP_CACHE_BYTES); struct { struct work_struct index_update_work; @@ -116,7 +116,7 @@ struct write_point { enum write_point_state state; u64 last_state_change; u64 time[WRITE_POINT_STATE_NR]; - } __attribute__((__aligned__(SMP_CACHE_BYTES))); + } __aligned(SMP_CACHE_BYTES); }; struct write_point_specifier { diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 3f5b4d8ee4a5..1a749d4be5b9 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -535,7 +535,7 @@ int bch2_check_topology(struct bch_fs *c) bch2_trans_init(&trans, c, 0, 0); - for (i = 0; i < btree_id_nr_alive(c)&& !ret; i++) { + for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) { struct btree_root *r = bch2_btree_id_root(c, i); if (!r->alive) diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h index 0cadf651e7cf..cd99bbb00a5a 100644 --- a/fs/bcachefs/btree_io.h +++ b/fs/bcachefs/btree_io.h @@ -143,8 +143,8 @@ enum btree_write_flags { __BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS, __BTREE_WRITE_ALREADY_STARTED, }; -#define BTREE_WRITE_ONLY_IF_NEED (1U << __BTREE_WRITE_ONLY_IF_NEED ) -#define BTREE_WRITE_ALREADY_STARTED (1U << __BTREE_WRITE_ALREADY_STARTED) +#define BTREE_WRITE_ONLY_IF_NEED BIT(__BTREE_WRITE_ONLY_IF_NEED) +#define BTREE_WRITE_ALREADY_STARTED BIT(__BTREE_WRITE_ALREADY_STARTED) void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned); void bch2_btree_node_write(struct bch_fs *, struct btree *, diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index ecc123b2d1b3..feb23e9c2a1a 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -1008,7 +1008,7 @@ retry_all: /* * We used to assert that all paths had been traversed here * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since - * path->Should_be_locked is not set yet, we we might have unlocked and + * path->should_be_locked is not set yet, we might have unlocked and * then failed to relock a path - that's fine. */ err: @@ -2738,9 +2738,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, unsigned depth, unsigned flags) { - flags |= BTREE_ITER_NOT_EXTENTS; - flags |= __BTREE_ITER_ALL_SNAPSHOTS; - flags |= BTREE_ITER_ALL_SNAPSHOTS; + flags |= BTREE_ITER_NOT_EXTENTS; + flags |= __BTREE_ITER_ALL_SNAPSHOTS; + flags |= BTREE_ITER_ALL_SNAPSHOTS; bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth, __bch2_btree_iter_flags(trans, btree_id, flags), diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 2281140a288c..901c42b57c35 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -268,10 +268,10 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr { struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags|BTREE_ITER_INTENT, type); - struct bkey_i *ret = unlikely(IS_ERR(k.k)) + struct bkey_i *ret = IS_ERR(k.k) ? ERR_CAST(k.k) : __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes); - if (unlikely(IS_ERR(ret))) + if (IS_ERR(ret)) bch2_trans_iter_exit(trans, iter); return ret; } diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 7bb7f0caee45..c02c8c917a29 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -1924,6 +1924,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca) { int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca)); + if (ret) bch_err_fn(c, ret); return ret; diff --git a/fs/bcachefs/chardev.h b/fs/bcachefs/chardev.h index 3a4890d39ff9..0f563ca53c36 100644 --- a/fs/bcachefs/chardev.h +++ b/fs/bcachefs/chardev.h @@ -17,7 +17,7 @@ int __init bch2_chardev_init(void); static inline long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user * arg) { - return -ENOSYS; + return -ENOTTY; } static inline void bch2_fs_chardev_exit(struct bch_fs *c) {} diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c index bf03d42c6138..76cf2e70f019 100644 --- a/fs/bcachefs/checksum.c +++ b/fs/bcachefs/checksum.c @@ -265,9 +265,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type, #ifdef CONFIG_HIGHMEM __bio_for_each_segment(bv, bio, *iter, *iter) { - void *p = kmap_atomic(bv.bv_page) + bv.bv_offset; + void *p = kmap_local_page(bv.bv_page) + bv.bv_offset; + bch2_checksum_update(&state, p, bv.bv_len); - kunmap_atomic(p); + kunmap_local(p); } #else __bio_for_each_bvec(bv, bio, *iter, *iter) @@ -287,10 +288,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type, #ifdef CONFIG_HIGHMEM __bio_for_each_segment(bv, bio, *iter, *iter) { - void *p = kmap_atomic(bv.bv_page) + bv.bv_offset; + void *p = kmap_local_page(bv.bv_page) + bv.bv_offset; crypto_shash_update(desc, p, bv.bv_len); - kunmap_atomic(p); + kunmap_local(p); } #else __bio_for_each_bvec(bv, bio, *iter, *iter) @@ -427,8 +428,9 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio, extent_nonce(version, crc_old), bio); if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) { - bch_err(c, "checksum error in bch2_rechecksum_bio() (memory corruption or bug?)\n" + bch_err(c, "checksum error in %s() (memory corruption or bug?)\n" "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)", + __func__, crc_old.csum.hi, crc_old.csum.lo, merged.hi, diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c index c9ca7cce55f8..6b17f7cc5860 100644 --- a/fs/bcachefs/compress.c +++ b/fs/bcachefs/compress.c @@ -643,7 +643,8 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) static u64 compression_opt_to_feature(unsigned v) { unsigned type = bch2_compression_decode(v).type; - return 1ULL << bch2_compression_opt_to_feature[type]; + + return BIT_ULL(bch2_compression_opt_to_feature[type]); } int bch2_fs_compress_init(struct bch_fs *c) diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 7a3f42f3bc5b..d7f74db4c83e 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -517,7 +517,7 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst, switch (type) { case BCH_EXTENT_ENTRY_crc32: set_common_fields(dst->crc32, src); - dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo); + dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo); break; case BCH_EXTENT_ENTRY_crc64: set_common_fields(dst->crc64, src); @@ -915,11 +915,11 @@ bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2) bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1) bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) - if (p1.ptr.dev == p2.ptr.dev && - p1.ptr.gen == p2.ptr.gen && - (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == - (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) - return true; + if (p1.ptr.dev == p2.ptr.dev && + p1.ptr.gen == p2.ptr.gen && + (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == + (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) + return true; return false; } else { diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 917ad1c8f46d..40bfd0b25d9d 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -2867,7 +2867,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode, folio = __filemap_get_folio(mapping, index, FGP_LOCK|FGP_CREAT, GFP_KERNEL); - if (unlikely(IS_ERR_OR_NULL(folio))) { + if (IS_ERR_OR_NULL(folio)) { ret = -ENOMEM; goto out; } diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index 5bacc6a9dd8f..f42d9da2e16e 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -2435,6 +2435,7 @@ static void __bch2_read_endio(struct work_struct *work) if (rbio->bounce) { struct bvec_iter src_iter = src->bi_iter; + bio_copy_data_iter(dst, &dst_iter, src, &src_iter); } } diff --git a/fs/bcachefs/io.h b/fs/bcachefs/io.h index 1476380d5fbf..831e3f1b7e41 100644 --- a/fs/bcachefs/io.h +++ b/fs/bcachefs/io.h @@ -52,7 +52,7 @@ enum __bch_write_flags { }; enum bch_write_flags { -#define x(f) BCH_WRITE_##f = 1U << __BCH_WRITE_##f, +#define x(f) BCH_WRITE_##f = BIT(__BCH_WRITE_##f), BCH_WRITE_FLAGS() #undef x }; diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 80a612c0577f..055920c26da6 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -63,6 +63,7 @@ journal_seq_to_buf(struct journal *j, u64 seq) static void journal_pin_list_init(struct journal_entry_pin_list *p, int count) { unsigned i; + for (i = 0; i < ARRAY_SIZE(p->list); i++) INIT_LIST_HEAD(&p->list[i]); INIT_LIST_HEAD(&p->flushed); @@ -514,8 +515,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, int ret; closure_wait_event(&j->async_wait, - (ret = __journal_res_get(j, res, flags)) != - -BCH_ERR_journal_res_get_blocked|| + (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked || (flags & JOURNAL_RES_GET_NONBLOCK)); return ret; } diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 798d4c8c7200..42c9700e6d26 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1053,6 +1053,7 @@ found: bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr); for (i = 0; i < 3; i++) { unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr; + bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]); } ja->sectors_free = 0; @@ -1629,7 +1630,6 @@ static void do_journal_write(struct closure *cl) } continue_at(cl, journal_write_done, c->io_complete_wq); - return; } static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset) diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index 8de83e103751..9a2a534915dd 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -345,7 +345,7 @@ static inline bool __journal_pin_drop(struct journal *j, list_del_init(&pin->list); /* - * Unpinning a journal entry make make journal_next_bucket() succeed, if + * Unpinning a journal entry may make journal_next_bucket() succeed, if * writing a new last_seq will now make another bucket available: */ return atomic_dec_and_test(&pin_list->count) && diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 35b67c544a6a..5dbe1b273b71 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -648,7 +648,7 @@ static int bch2_journal_replay(struct bch_fs *c) move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr); keys->gap = keys->nr; - keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL); + keys_sorted = kvmalloc_array(keys->nr, sizeof(*keys_sorted), GFP_KERNEL); if (!keys_sorted) return -BCH_ERR_ENOMEM_journal_replay; @@ -1403,7 +1403,7 @@ use_clean: } c->journal_replay_seq_start = last_seq; - c->journal_replay_seq_end = blacklist_seq - 1;; + c->journal_replay_seq_end = blacklist_seq - 1; if (c->opts.reconstruct_alloc) { c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index a58b9750b6ce..405ea74d0b83 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -553,7 +553,9 @@ static int __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src) d = (src_f ? le32_to_cpu(src_f->u64s) : 0) - (dst_f ? le32_to_cpu(dst_f->u64s) : 0); if (d > 0) { - int ret = bch2_sb_realloc(dst_handle, le32_to_cpu(dst_handle->sb->u64s) + d); + int ret = bch2_sb_realloc(dst_handle, + le32_to_cpu(dst_handle->sb->u64s) + d); + if (ret) return ret; diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h index 904adea6a0da..6e59b0148f8d 100644 --- a/fs/bcachefs/super-io.h +++ b/fs/bcachefs/super-io.h @@ -58,6 +58,7 @@ struct bch_sb_field_ops { static inline __le64 bch2_sb_magic(struct bch_fs *c) { __le64 ret; + memcpy(&ret, &c->sb.uuid, sizeof(ret)); return ret; } diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c index 6374d8aa9afc..e4f21fcae944 100644 --- a/fs/bcachefs/util.c +++ b/fs/bcachefs/util.c @@ -216,6 +216,7 @@ u64 bch2_read_flag_list(char *opt, const char * const list[]) while ((p = strsep(&s, ","))) { int flag = match_string(list, -1, p); + if (flag < 0) { ret = -1; break; @@ -797,9 +798,10 @@ void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src) struct bvec_iter iter; __bio_for_each_segment(bv, dst, iter, dst_iter) { - void *dstp = kmap_atomic(bv.bv_page); + void *dstp = kmap_local_page(bv.bv_page); + memcpy(dstp + bv.bv_offset, src, bv.bv_len); - kunmap_atomic(dstp); + kunmap_local(dstp); src += bv.bv_len; } @@ -811,9 +813,10 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter) struct bvec_iter iter; __bio_for_each_segment(bv, src, iter, src_iter) { - void *srcp = kmap_atomic(bv.bv_page); + void *srcp = kmap_local_page(bv.bv_page); + memcpy(dst, srcp + bv.bv_offset, bv.bv_len); - kunmap_atomic(srcp); + kunmap_local(srcp); dst += bv.bv_len; } diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index 8e37ce01a728..3cec6171c58f 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -467,8 +467,10 @@ struct bch_pd_controller { s64 last_change; s64 last_target; - /* If true, the rate will not increase if bch2_ratelimit_delay() - * is not being called often enough. */ + /* + * If true, the rate will not increase if bch2_ratelimit_delay() + * is not being called often enough. + */ bool backpressure; }; @@ -604,6 +606,7 @@ static inline void __memcpy_u64s(void *dst, const void *src, { #ifdef CONFIG_X86_64 long d0, d1, d2; + asm volatile("rep ; movsq" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (u64s), "1" (dst), "2" (src) @@ -680,6 +683,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src, #ifdef CONFIG_X86_64 long d0, d1, d2; + asm volatile("std ;\n" "rep ; movsq\n" "cld ;\n" diff --git a/fs/bcachefs/varint.c b/fs/bcachefs/varint.c index ef030fc02448..2a2ab86ed6e1 100644 --- a/fs/bcachefs/varint.c +++ b/fs/bcachefs/varint.c @@ -59,6 +59,7 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out) if (likely(bytes < 9)) { __le64 v_le = 0; + memcpy(&v_le, in, bytes); v = le64_to_cpu(v_le); v >>= bytes; |