summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-16 10:53:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-16 10:53:01 -0800
commit087a76d390cbb8c0d21ea0cb3672ab4a7bb76362 (patch)
tree7c86752ba8d430c37618851ee2806b5b921299bd
parent759b2656b259d10935647a92dbfae7fafee6a790 (diff)
parent2939e1a86f758b55cdba73e29397dd3d94df13bc (diff)
Merge branch 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason: "Jeff Mahoney and Dave Sterba have a really nice set of cleanups in here, and Christoph pitched in corrections/improvements to make btrfs use proper helpers for bio walking instead of doing it by hand. There are some key fixes as well, including some long standing bugs that took forever to track down in btrfs_drop_extents and during balance" * 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (77 commits) btrfs: limit async_work allocation and worker func duration Revert "Btrfs: adjust len of writes if following a preallocated extent" Btrfs: don't WARN() in btrfs_transaction_abort() for IO errors btrfs: opencode chunk locking, remove helpers btrfs: remove root parameter from transaction commit/end routines btrfs: split btrfs_wait_marked_extents into normal and tree log functions btrfs: take an fs_info directly when the root is not used otherwise btrfs: simplify btrfs_wait_cache_io prototype btrfs: convert extent-tree tracepoints to use fs_info btrfs: root->fs_info cleanup, access fs_info->delayed_root directly btrfs: root->fs_info cleanup, add fs_info convenience variables btrfs: root->fs_info cleanup, update_block_group{,flags} btrfs: root->fs_info cleanup, lock/unlock_chunks btrfs: root->fs_info cleanup, btrfs_calc_{trans,trunc}_metadata_size btrfs: pull node/sector/stripe sizes out of root and into fs_info btrfs: root->fs_info cleanup, io_ctl_init btrfs: root->fs_info cleanup, use fs_info->dev_root everywhere btrfs: struct reada_control.root -> reada_control.fs_info btrfs: struct btrfsic_state->root should be an fs_info btrfs: alloc_reserved_file_extent trace point should use extent_root ...
-rw-r--r--fs/btrfs/async-thread.c14
-rw-r--r--fs/btrfs/async-thread.h1
-rw-r--r--fs/btrfs/backref.c10
-rw-r--r--fs/btrfs/check-integrity.c103
-rw-r--r--fs/btrfs/check-integrity.h5
-rw-r--r--fs/btrfs/compression.c196
-rw-r--r--fs/btrfs/compression.h12
-rw-r--r--fs/btrfs/ctree.c495
-rw-r--r--fs/btrfs/ctree.h241
-rw-r--r--fs/btrfs/delayed-inode.c147
-rw-r--r--fs/btrfs/delayed-inode.h21
-rw-r--r--fs/btrfs/delayed-ref.c20
-rw-r--r--fs/btrfs/delayed-ref.h14
-rw-r--r--fs/btrfs/dev-replace.c68
-rw-r--r--fs/btrfs/dev-replace.h4
-rw-r--r--fs/btrfs/dir-item.c45
-rw-r--r--fs/btrfs/disk-io.c595
-rw-r--r--fs/btrfs/disk-io.h34
-rw-r--r--fs/btrfs/export.c10
-rw-r--r--fs/btrfs/extent-tree.c1551
-rw-r--r--fs/btrfs/extent_io.c112
-rw-r--r--fs/btrfs/extent_io.h17
-rw-r--r--fs/btrfs/file-item.c207
-rw-r--r--fs/btrfs/file.c249
-rw-r--r--fs/btrfs/free-space-cache.c164
-rw-r--r--fs/btrfs/free-space-cache.h12
-rw-r--r--fs/btrfs/free-space-tree.c44
-rw-r--r--fs/btrfs/inode-item.c11
-rw-r--r--fs/btrfs/inode-map.c22
-rw-r--r--fs/btrfs/inode.c910
-rw-r--r--fs/btrfs/ioctl.c603
-rw-r--r--fs/btrfs/lzo.c17
-rw-r--r--fs/btrfs/ordered-data.c38
-rw-r--r--fs/btrfs/ordered-data.h4
-rw-r--r--fs/btrfs/print-tree.c19
-rw-r--r--fs/btrfs/print-tree.h4
-rw-r--r--fs/btrfs/props.c5
-rw-r--r--fs/btrfs/qgroup.c299
-rw-r--r--fs/btrfs/qgroup.h64
-rw-r--r--fs/btrfs/raid56.c78
-rw-r--r--fs/btrfs/raid56.h8
-rw-r--r--fs/btrfs/reada.c62
-rw-r--r--fs/btrfs/relocation.c453
-rw-r--r--fs/btrfs/root-tree.c28
-rw-r--r--fs/btrfs/scrub.c181
-rw-r--r--fs/btrfs/send.c33
-rw-r--r--fs/btrfs/super.c138
-rw-r--r--fs/btrfs/tests/btrfs-tests.c13
-rw-r--r--fs/btrfs/tests/btrfs-tests.h4
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c7
-rw-r--r--fs/btrfs/tests/extent-io-tests.c7
-rw-r--r--fs/btrfs/tests/free-space-tests.c18
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c9
-rw-r--r--fs/btrfs/tests/inode-tests.c16
-rw-r--r--fs/btrfs/tests/qgroup-tests.c11
-rw-r--r--fs/btrfs/transaction.c615
-rw-r--r--fs/btrfs/transaction.h29
-rw-r--r--fs/btrfs/tree-log.c202
-rw-r--r--fs/btrfs/uuid-tree.c23
-rw-r--r--fs/btrfs/volumes.c845
-rw-r--r--fs/btrfs/volumes.h70
-rw-r--r--fs/btrfs/xattr.c21
-rw-r--r--fs/btrfs/zlib.c16
-rw-r--r--include/trace/events/btrfs.h67
64 files changed, 4713 insertions, 4628 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index e0f071f6b5a7..63d197724519 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -86,6 +86,20 @@ btrfs_work_owner(struct btrfs_work *work)
return work->wq->fs_info;
}
+bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
+{
+ /*
+ * We could compare wq->normal->pending with num_online_cpus()
+ * to support "thresh == NO_THRESHOLD" case, but it requires
+ * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
+ * postpone it until someone needs the support of that case.
+ */
+ if (wq->normal->thresh == NO_THRESHOLD)
+ return false;
+
+ return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
+}
+
BTRFS_WORK_HELPER(worker_helper);
BTRFS_WORK_HELPER(delalloc_helper);
BTRFS_WORK_HELPER(flush_delalloc_helper);
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 8e52484cd461..1f9597355c9d 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -84,4 +84,5 @@ void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work);
struct btrfs_fs_info *btrfs_work_owner(struct btrfs_work *work);
struct btrfs_fs_info *btrfs_workqueue_owner(struct __btrfs_workqueue *wq);
+bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq);
#endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 85dc7ab8f89e..8299601a3549 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -788,8 +788,7 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
if (ref->key_for_search.type)
continue;
BUG_ON(!ref->wanted_disk_byte);
- eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
- 0);
+ eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
@@ -1405,8 +1404,7 @@ again:
ref->level == 0) {
struct extent_buffer *eb;
- eb = read_tree_block(fs_info->extent_root,
- ref->parent, 0);
+ eb = read_tree_block(fs_info, ref->parent, 0);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
@@ -1829,7 +1827,7 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
}
btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
if (found_key->type == BTRFS_METADATA_ITEM_KEY)
- size = fs_info->extent_root->nodesize;
+ size = fs_info->nodesize;
else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
size = found_key->offset;
@@ -2058,7 +2056,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
out:
if (!search_commit_root) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
- btrfs_end_transaction(trans, fs_info->extent_root);
+ btrfs_end_transaction(trans);
} else {
up_read(&fs_info->commit_root_sem);
}
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 8e99251650b3..ab14c2e635ca 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -254,7 +254,7 @@ struct btrfsic_state {
struct list_head all_blocks_list;
struct btrfsic_block_hashtable block_hashtable;
struct btrfsic_block_link_hashtable block_link_hashtable;
- struct btrfs_root *root;
+ struct btrfs_fs_info *fs_info;
u64 max_superblock_generation;
struct btrfsic_block *latest_superblock;
u32 metablock_size;
@@ -646,11 +646,12 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
static int btrfsic_process_superblock(struct btrfsic_state *state,
struct btrfs_fs_devices *fs_devices)
{
- int ret = 0;
+ struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_super_block *selected_super;
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
struct btrfsic_dev_state *selected_dev_state = NULL;
+ int ret = 0;
int pass;
BUG_ON(NULL == state);
@@ -716,9 +717,8 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
break;
}
- num_copies =
- btrfs_num_copies(state->root->fs_info,
- next_bytenr, state->metablock_size);
+ num_copies = btrfs_num_copies(fs_info, next_bytenr,
+ state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
@@ -783,6 +783,7 @@ static int btrfsic_process_superblock_dev_mirror(
struct btrfsic_dev_state **selected_dev_state,
struct btrfs_super_block *selected_super)
{
+ struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_super_block *super_tmp;
u64 dev_bytenr;
struct buffer_head *bh;
@@ -832,7 +833,7 @@ static int btrfsic_process_superblock_dev_mirror(
superblock_tmp->never_written = 0;
superblock_tmp->mirror_num = 1 + superblock_mirror_num;
if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- btrfs_info_in_rcu(device->dev_root->fs_info,
+ btrfs_info_in_rcu(fs_info,
"new initial S-block (bdev %p, %s) @%llu (%s/%llu/%d)",
superblock_bdev,
rcu_str_deref(device->name), dev_bytenr,
@@ -887,9 +888,8 @@ static int btrfsic_process_superblock_dev_mirror(
break;
}
- num_copies =
- btrfs_num_copies(state->root->fs_info,
- next_bytenr, state->metablock_size);
+ num_copies = btrfs_num_copies(fs_info, next_bytenr,
+ state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
@@ -1254,6 +1254,7 @@ static int btrfsic_create_link_to_next_block(
struct btrfs_disk_key *disk_key,
u64 parent_generation)
{
+ struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfsic_block *next_block = NULL;
int ret;
struct btrfsic_block_link *l;
@@ -1262,9 +1263,8 @@ static int btrfsic_create_link_to_next_block(
*next_blockp = NULL;
if (0 == *num_copiesp) {
- *num_copiesp =
- btrfs_num_copies(state->root->fs_info,
- next_bytenr, state->metablock_size);
+ *num_copiesp = btrfs_num_copies(fs_info, next_bytenr,
+ state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, *num_copiesp);
@@ -1390,13 +1390,14 @@ static int btrfsic_handle_extent_data(
struct btrfsic_block_data_ctx *block_ctx,
u32 item_offset, int force_iodone_flag)
{
- int ret;
+ struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_file_extent_item file_extent_item;
u64 file_extent_item_offset;
u64 next_bytenr;
u64 num_bytes;
u64 generation;
struct btrfsic_block_link *l;
+ int ret;
file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
item_offset;
@@ -1456,9 +1457,8 @@ static int btrfsic_handle_extent_data(
else
chunk_len = num_bytes;
- num_copies =
- btrfs_num_copies(state->root->fs_info,
- next_bytenr, state->datablock_size);
+ num_copies = btrfs_num_copies(fs_info, next_bytenr,
+ state->datablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
@@ -1533,13 +1533,14 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfsic_block_data_ctx *block_ctx_out,
int mirror_num)
{
+ struct btrfs_fs_info *fs_info = state->fs_info;
int ret;
u64 length;
struct btrfs_bio *multi = NULL;
struct btrfs_device *device;
length = len;
- ret = btrfs_map_block(state->root->fs_info, READ,
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
bytenr, &length, &multi, mirror_num);
if (ret) {
@@ -1731,6 +1732,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
static int btrfsic_test_for_metadata(struct btrfsic_state *state,
char **datav, unsigned int num_pages)
{
+ struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_header *h;
u8 csum[BTRFS_CSUM_SIZE];
u32 crc = ~(u32)0;
@@ -1741,7 +1743,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
num_pages = state->metablock_size >> PAGE_SHIFT;
h = (struct btrfs_header *)datav[0];
- if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
+ if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
return 1;
for (i = 0; i < num_pages; i++) {
@@ -2202,6 +2204,7 @@ static int btrfsic_process_written_superblock(
struct btrfsic_block *const superblock,
struct btrfs_super_block *const super_hdr)
{
+ struct btrfs_fs_info *fs_info = state->fs_info;
int pass;
superblock->generation = btrfs_super_generation(super_hdr);
@@ -2275,9 +2278,8 @@ static int btrfsic_process_written_superblock(
break;
}
- num_copies =
- btrfs_num_copies(state->root->fs_info,
- next_bytenr, BTRFS_SUPER_INFO_SIZE);
+ num_copies = btrfs_num_copies(fs_info, next_bytenr,
+ BTRFS_SUPER_INFO_SIZE);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
@@ -2699,14 +2701,14 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
struct btrfsic_dev_state *dev_state,
u64 dev_bytenr)
{
+ struct btrfs_fs_info *fs_info = state->fs_info;
+ struct btrfsic_block_data_ctx block_ctx;
int num_copies;
int mirror_num;
- int ret;
- struct btrfsic_block_data_ctx block_ctx;
int match = 0;
+ int ret;
- num_copies = btrfs_num_copies(state->root->fs_info,
- bytenr, state->metablock_size);
+ num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
ret = btrfsic_map_block(state, bytenr, state->metablock_size,
@@ -2819,10 +2821,11 @@ static void __btrfsic_submit_bio(struct bio *bio)
* btrfsic_mount(), this might return NULL */
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
if (NULL != dev_state &&
- (bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) {
+ (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i;
u64 dev_bytenr;
u64 cur_bytenr;
+ struct bio_vec *bvec;
int bio_is_patched;
char **mapped_datav;
@@ -2840,32 +2843,23 @@ static void __btrfsic_submit_bio(struct bio *bio)
if (!mapped_datav)
goto leave;
cur_bytenr = dev_bytenr;
- for (i = 0; i < bio->bi_vcnt; i++) {
- BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
- mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
- if (!mapped_datav[i]) {
- while (i > 0) {
- i--;
- kunmap(bio->bi_io_vec[i].bv_page);
- }
- kfree(mapped_datav);
- goto leave;
- }
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ BUG_ON(bvec->bv_len != PAGE_SIZE);
+ mapped_datav[i] = kmap(bvec->bv_page);
+
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
- i, cur_bytenr, bio->bi_io_vec[i].bv_len,
- bio->bi_io_vec[i].bv_offset);
- cur_bytenr += bio->bi_io_vec[i].bv_len;
+ i, cur_bytenr, bvec->bv_len, bvec->bv_offset);
+ cur_bytenr += bvec->bv_len;
}
btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt,
bio, &bio_is_patched,
NULL, bio->bi_opf);
- while (i > 0) {
- i--;
- kunmap(bio->bi_io_vec[i].bv_page);
- }
+ bio_for_each_segment_all(bvec, bio, i)
+ kunmap(bvec->bv_page);
kfree(mapped_datav);
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
@@ -2910,7 +2904,7 @@ int btrfsic_submit_bio_wait(struct bio *bio)
return submit_bio_wait(bio);
}
-int btrfsic_mount(struct btrfs_root *root,
+int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask)
{
@@ -2919,14 +2913,14 @@ int btrfsic_mount(struct btrfs_root *root,
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
- if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
+ if (fs_info->nodesize & ((u64)PAGE_SIZE - 1)) {
pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
- root->nodesize, PAGE_SIZE);
+ fs_info->nodesize, PAGE_SIZE);
return -1;
}
- if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
+ if (fs_info->sectorsize & ((u64)PAGE_SIZE - 1)) {
pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
- root->sectorsize, PAGE_SIZE);
+ fs_info->sectorsize, PAGE_SIZE);
return -1;
}
state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
@@ -2944,12 +2938,12 @@ int btrfsic_mount(struct btrfs_root *root,
btrfsic_is_initialized = 1;
}
mutex_lock(&btrfsic_mutex);
- state->root = root;
+ state->fs_info = fs_info;
state->print_mask = print_mask;
state->include_extent_data = including_extent_data;
state->csum_size = 0;
- state->metablock_size = root->nodesize;
- state->datablock_size = root->sectorsize;
+ state->metablock_size = fs_info->nodesize;
+ state->datablock_size = fs_info->sectorsize;
INIT_LIST_HEAD(&state->all_blocks_list);
btrfsic_block_hashtable_init(&state->block_hashtable);
btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
@@ -2982,7 +2976,7 @@ int btrfsic_mount(struct btrfs_root *root,
ret = btrfsic_process_superblock(state, fs_devices);
if (0 != ret) {
mutex_unlock(&btrfsic_mutex);
- btrfsic_unmount(root, fs_devices);
+ btrfsic_unmount(fs_devices);
return ret;
}
@@ -2995,8 +2989,7 @@ int btrfsic_mount(struct btrfs_root *root,
return 0;
}
-void btrfsic_unmount(struct btrfs_root *root,
- struct btrfs_fs_devices *fs_devices)
+void btrfsic_unmount(struct btrfs_fs_devices *fs_devices)
{
struct btrfsic_block *b_all, *tmp_all;
struct btrfsic_state *state;
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
index f78dff1c7e86..2de58a99ee92 100644
--- a/fs/btrfs/check-integrity.h
+++ b/fs/btrfs/check-integrity.h
@@ -29,10 +29,9 @@ int btrfsic_submit_bio_wait(struct bio *bio);
#define btrfsic_submit_bio_wait submit_bio_wait
#endif
-int btrfsic_mount(struct btrfs_root *root,
+int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask);
-void btrfsic_unmount(struct btrfs_root *root,
- struct btrfs_fs_devices *fs_devices);
+void btrfsic_unmount(struct btrfs_fs_devices *fs_devices);
#endif
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d4d8b7e36b2f..7f390849343b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -81,17 +81,17 @@ struct compressed_bio {
u32 sums;
};
-static int btrfs_decompress_biovec(int type, struct page **pages_in,
- u64 disk_start, struct bio_vec *bvec,
- int vcnt, size_t srclen);
+static int btrfs_decompress_bio(int type, struct page **pages_in,
+ u64 disk_start, struct bio *orig_bio,
+ size_t srclen);
-static inline int compressed_bio_size(struct btrfs_root *root,
+static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size)
{
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
return sizeof(struct compressed_bio) +
- (DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size;
+ (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
}
static struct bio *compressed_bio_alloc(struct block_device *bdev,
@@ -120,7 +120,7 @@ static int check_compressed_csum(struct inode *inode,
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
- btrfs_csum_final(csum, (char *)&csum);
+ btrfs_csum_final(csum, (u8 *)&csum);
kunmap_atomic(kaddr);
if (csum != *cb_sum) {
@@ -175,11 +175,10 @@ static void end_compressed_bio_read(struct bio *bio)
/* ok, we're the last bio for this extent, lets start
* the decompression.
*/
- ret = btrfs_decompress_biovec(cb->compress_type,
+ ret = btrfs_decompress_bio(cb->compress_type,
cb->compressed_pages,
cb->start,
- cb->orig_bio->bi_io_vec,
- cb->orig_bio->bi_vcnt,
+ cb->orig_bio,
cb->compressed_len);
csum_failed:
if (ret)
@@ -329,8 +328,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct page **compressed_pages,
unsigned long nr_pages)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct compressed_bio *cb;
unsigned long bytes_left;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -342,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_SIZE - 1));
- cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+ cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
return -ENOMEM;
atomic_set(&cb->pending_bios, 0);
@@ -356,7 +355,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
cb->orig_bio = NULL;
cb->nr_pages = nr_pages;
- bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+ bdev = fs_info->fs_devices->latest_bdev;
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
if (!bio) {
@@ -392,17 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
* freed before we're done setting it up
*/
atomic_inc(&cb->pending_bios);
- ret = btrfs_bio_wq_end_io(root->fs_info, bio,
- BTRFS_WQ_ENDIO_DATA);
+ ret = btrfs_bio_wq_end_io(fs_info, bio,
+ BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
- ret = btrfs_csum_one_bio(root, inode, bio,
- start, 1);
+ ret = btrfs_csum_one_bio(inode, bio, start, 1);
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, bio, 0, 1);
+ ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@@ -418,7 +416,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_add_page(bio, page, PAGE_SIZE, 0);
}
if (bytes_left < PAGE_SIZE) {
- btrfs_info(BTRFS_I(inode)->root->fs_info,
+ btrfs_info(fs_info,
"bytes left %lu compress len %lu nr %lu",
bytes_left, cb->compressed_len, cb->nr_pages);
}
@@ -428,15 +426,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
}
bio_get(bio);
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA);
+ ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
- ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
+ ret = btrfs_csum_one_bio(inode, bio, start, 1);
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, bio, 0, 1);
+ ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@@ -446,6 +444,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
return 0;
}
+static u64 bio_end_offset(struct bio *bio)
+{
+ struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
+
+ return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
+}
+
static noinline int add_ra_bio_pages(struct inode *inode,
u64 compressed_end,
struct compressed_bio *cb)
@@ -464,8 +469,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
u64 end;
int misses = 0;
- page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
- last_offset = (page_offset(page) + PAGE_SIZE);
+ last_offset = bio_end_offset(cb->orig_bio);
em_tree = &BTRFS_I(inode)->extent_tree;
tree = &BTRFS_I(inode)->io_tree;
@@ -563,7 +567,6 @@ next:
*
* bio->bi_iter.bi_sector points to the compressed extent on disk
* bio->bi_io_vec points to all of the inode pages
- * bio->bi_vcnt is a count of pages
*
* After the compressed pages are read, we copy the bytes into the
* bio we were passed and then call the bio end_io calls
@@ -571,11 +574,10 @@ next:
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *tree;
struct extent_map_tree *em_tree;
struct compressed_bio *cb;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
unsigned long compressed_len;
unsigned long nr_pages;
unsigned long pg_index;
@@ -603,7 +605,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return -EIO;
compressed_len = em->block_len;
- cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+ cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
goto out;
@@ -620,7 +622,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
free_extent_map(em);
em = NULL;
- cb->len = uncompressed_len;
+ cb->len = bio->bi_iter.bi_size;
cb->compressed_len = compressed_len;
cb->compress_type = extent_compress_type(bio_flags);
cb->orig_bio = bio;
@@ -631,7 +633,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!cb->compressed_pages)
goto fail1;
- bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+ bdev = fs_info->fs_devices->latest_bdev;
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
@@ -648,8 +650,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
add_ra_bio_pages(inode, em_start + em_len, cb);
/* include any pages we added in add_ra-bio_pages */
- uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
- cb->len = uncompressed_len;
+ cb->len = bio->bi_iter.bi_size;
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
if (!comp_bio)
@@ -676,8 +677,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
PAGE_SIZE) {
bio_get(comp_bio);
- ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
- BTRFS_WQ_ENDIO_DATA);
+ ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
+ BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
/*
@@ -689,14 +690,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
atomic_inc(&cb->pending_bios);
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- ret = btrfs_lookup_bio_sums(root, inode,
- comp_bio, sums);
+ ret = btrfs_lookup_bio_sums(inode, comp_bio,
+ sums);
BUG_ON(ret); /* -ENOMEM */
}
sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
- root->sectorsize);
+ fs_info->sectorsize);
- ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) {
comp_bio->bi_error = ret;
bio_endio(comp_bio);
@@ -717,16 +718,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
bio_get(comp_bio);
- ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
- BTRFS_WQ_ENDIO_DATA);
+ ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
+ ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) {
comp_bio->bi_error = ret;
bio_endio(comp_bio);
@@ -959,9 +959,7 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
*
* disk_start is the starting logical offset of this array in the file
*
- * bvec is a bio_vec of pages from the file that we want to decompress into
- *
- * vcnt is the count of pages in the biovec
+ * orig_bio contains the pages from the file that we want to decompress into
*
* srclen is the number of bytes in pages_in
*
@@ -970,18 +968,18 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
* be contiguous. They all correspond to the range of bytes covered by
* the compressed extent.
*/
-static int btrfs_decompress_biovec(int type, struct page **pages_in,
- u64 disk_start, struct bio_vec *bvec,
- int vcnt, size_t srclen)
+static int btrfs_decompress_bio(int type, struct page **pages_in,
+ u64 disk_start, struct bio *orig_bio,
+ size_t srclen)
{
struct list_head *workspace;
int ret;
workspace = find_workspace(type);
- ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
- disk_start,
- bvec, vcnt, srclen);
+ ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
+ disk_start, orig_bio,
+ srclen);
free_workspace(type, workspace);
return ret;
}
@@ -1021,9 +1019,7 @@ void btrfs_exit_compress(void)
*/
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
- struct bio_vec *bvec, int vcnt,
- unsigned long *pg_index,
- unsigned long *pg_offset)
+ struct bio *bio)
{
unsigned long buf_offset;
unsigned long current_buf_start;
@@ -1031,13 +1027,13 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long working_bytes = total_out - buf_start;
unsigned long bytes;
char *kaddr;
- struct page *page_out = bvec[*pg_index].bv_page;
+ struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
/*
* start byte is the first byte of the page we're currently
* copying into relative to the start of the compressed data.
*/
- start_byte = page_offset(page_out) - disk_start;
+ start_byte = page_offset(bvec.bv_page) - disk_start;
/* we haven't yet hit data corresponding to this page */
if (total_out <= start_byte)
@@ -1057,80 +1053,46 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
/* copy bytes from the working buffer into the pages */
while (working_bytes > 0) {
- bytes = min(PAGE_SIZE - *pg_offset,
- PAGE_SIZE - buf_offset);
+ bytes = min_t(unsigned long, bvec.bv_len,
+ PAGE_SIZE - buf_offset);
bytes = min(bytes, working_bytes);
- kaddr = kmap_atomic(page_out);
- memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
+
+ kaddr = kmap_atomic(bvec.bv_page);
+ memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
kunmap_atomic(kaddr);
- flush_dcache_page(page_out);
+ flush_dcache_page(bvec.bv_page);
- *pg_offset += bytes;
buf_offset += bytes;
working_bytes -= bytes;
current_buf_start += bytes;
/* check if we need to pick another page */
- if (*pg_offset == PAGE_SIZE) {
- (*pg_index)++;
- if (*pg_index >= vcnt)
- return 0;
+ bio_advance(bio, bytes);
+ if (!bio->bi_iter.bi_size)
+ return 0;
+ bvec = bio_iter_iovec(bio, bio->bi_iter);
- page_out = bvec[*pg_index].bv_page;
- *pg_offset = 0;
- start_byte = page_offset(page_out) - disk_start;
+ start_byte = page_offset(bvec.bv_page) - disk_start;
- /*
- * make sure our new page is covered by this
- * working buffer
- */
- if (total_out <= start_byte)
- return 1;
+ /*
+ * make sure our new page is covered by this
+ * working buffer
+ */
+ if (total_out <= start_byte)
+ return 1;
- /*
- * the next page in the biovec might not be adjacent
- * to the last page, but it might still be found
- * inside this working buffer. bump our offset pointer
- */
- if (total_out > start_byte &&
- current_buf_start < start_byte) {
- buf_offset = start_byte - buf_start;
- working_bytes = total_out - start_byte;
- current_buf_start = buf_start + buf_offset;
- }
+ /*
+ * the next page in the biovec might not be adjacent
+ * to the last page, but it might still be found
+ * inside this working buffer. bump our offset pointer
+ */
+ if (total_out > start_byte &&
+ current_buf_start < start_byte) {
+ buf_offset = start_byte - buf_start;
+ working_bytes = total_out - start_byte;
+ current_buf_start = buf_start + buf_offset;
}
}
return 1;
}
-
-/*
- * When uncompressing data, we need to make sure and zero any parts of
- * the biovec that were not filled in by the decompression code. pg_index
- * and pg_offset indicate the last page and the last offset of that page
- * that have been filled in. This will zero everything remaining in the
- * biovec.
- */
-void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
- unsigned long pg_index,
- unsigned long pg_offset)
-{
- while (pg_index < vcnt) {
- struct page *page = bvec[pg_index].bv_page;
- unsigned long off = bvec[pg_index].bv_offset;
- unsigned long len = bvec[pg_index].bv_len;
-
- if (pg_offset < off)
- pg_offset = off;
- if (pg_offset < off + len) {
- unsigned long bytes = off + len - pg_offset;
- char *kaddr;
-
- kaddr = kmap_atomic(page);
- memset(kaddr + pg_offset, 0, bytes);
- kunmap_atomic(kaddr);
- }
- pg_index++;
- pg_offset = 0;
- }
-}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index f49d8b8c0f00..09879579fbc8 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -34,9 +34,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
unsigned long start_byte, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
- struct bio_vec *bvec, int vcnt,
- unsigned long *pg_index,
- unsigned long *pg_offset);
+ struct bio *bio);
int btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long len, u64 disk_start,
@@ -45,9 +43,6 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long nr_pages);
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
-void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
- unsigned long pg_index,
- unsigned long pg_offset);
enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
@@ -72,11 +67,10 @@ struct btrfs_compress_op {
unsigned long *total_out,
unsigned long max_out);
- int (*decompress_biovec)(struct list_head *workspace,
+ int (*decompress_bio)(struct list_head *workspace,
struct page **pages_in,
u64 disk_start,
- struct bio_vec *bvec,
- int vcnt,
+ struct bio *orig_bio,
size_t srclen);
int (*decompress)(struct list_head *workspace,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index f6ba165d3f81..a426dc822d4d 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -32,10 +32,11 @@ static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *ins_key,
struct btrfs_path *path, int data_size, int extend);
static int push_node_left(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *dst,
+ struct btrfs_fs_info *fs_info,
+ struct extent_buffer *dst,
struct extent_buffer *src, int empty);
static int balance_node_right(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
@@ -212,21 +213,23 @@ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
*/
static void add_root_to_dirty_list(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
!test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
return;
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
/* Want the extent tree to be the last on the list */
if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
list_move_tail(&root->dirty_list,
- &root->fs_info->dirty_cowonly_roots);
+ &fs_info->dirty_cowonly_roots);
else
list_move(&root->dirty_list,
- &root->fs_info->dirty_cowonly_roots);
+ &fs_info->dirty_cowonly_roots);
}
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
}
/*
@@ -239,13 +242,14 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct extent_buffer *buf,
struct extent_buffer **cow_ret, u64 new_root_objectid)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cow;
int ret = 0;
int level;
struct btrfs_disk_key disk_key;
WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
- trans->transid != root->fs_info->running_transaction->transid);
+ trans->transid != fs_info->running_transaction->transid);
WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
trans->transid != root->last_trans);
@@ -260,7 +264,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
if (IS_ERR(cow))
return PTR_ERR(cow);
- copy_extent_buffer(cow, buf, 0, 0, cow->len);
+ copy_extent_buffer_full(cow, buf);
btrfs_set_header_bytenr(cow, cow->start);
btrfs_set_header_generation(cow, trans->transid);
btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
@@ -271,8 +275,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
else
btrfs_set_header_owner(cow, new_root_objectid);
- write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
- BTRFS_FSID_SIZE);
+ write_extent_buffer_fsid(cow, fs_info->fsid);
WARN_ON(btrfs_header_generation(buf) > trans->transid);
if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
@@ -978,6 +981,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
struct extent_buffer *cow,
int *last_ref)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 refs;
u64 owner;
u64 flags;
@@ -1002,14 +1006,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
*/
if (btrfs_block_can_be_shared(root, buf)) {
- ret = btrfs_lookup_extent_info(trans, root, buf->start,
+ ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
btrfs_header_level(buf), 1,
&refs, &flags);
if (ret)
return ret;
if (refs == 0) {
ret = -EROFS;
- btrfs_handle_fs_error(root->fs_info, ret, NULL);
+ btrfs_handle_fs_error(fs_info, ret, NULL);
return ret;
}
} else {
@@ -1052,7 +1056,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (new_flags != 0) {
int level = btrfs_header_level(buf);
- ret = btrfs_set_disk_extent_flags(trans, root,
+ ret = btrfs_set_disk_extent_flags(trans, fs_info,
buf->start,
buf->len,
new_flags, level, 0);
@@ -1070,7 +1074,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
ret = btrfs_dec_ref(trans, root, buf, 1);
BUG_ON(ret); /* -ENOMEM */
}
- clean_tree_block(trans, root->fs_info, buf);
+ clean_tree_block(trans, fs_info, buf);
*last_ref = 1;
}
return 0;
@@ -1095,6 +1099,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
struct extent_buffer **cow_ret,
u64 search_start, u64 empty_size)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_disk_key disk_key;
struct extent_buffer *cow;
int level, ret;
@@ -1108,7 +1113,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_assert_tree_locked(buf);
WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
- trans->transid != root->fs_info->running_transaction->transid);
+ trans->transid != fs_info->running_transaction->transid);
WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
trans->transid != root->last_trans);
@@ -1130,7 +1135,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
/* cow is set to blocking by btrfs_init_new_buffer */
- copy_extent_buffer(cow, buf, 0, 0, cow->len);
+ copy_extent_buffer_full(cow, buf);
btrfs_set_header_bytenr(cow, cow->start);
btrfs_set_header_generation(cow, trans->transid);
btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
@@ -1141,8 +1146,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
btrfs_set_header_owner(cow, root->root_key.objectid);
- write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
- BTRFS_FSID_SIZE);
+ write_extent_buffer_fsid(cow, fs_info->fsid);
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
if (ret) {
@@ -1174,7 +1178,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
add_root_to_dirty_list(root);
} else {
WARN_ON(trans->transid != btrfs_header_generation(parent));
- tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
+ tree_mod_log_insert_key(fs_info, parent, parent_slot,
MOD_LOG_KEY_REPLACE, GFP_NOFS);
btrfs_set_node_blockptr(parent, parent_slot,
cow->start);
@@ -1182,7 +1186,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
trans->transid);
btrfs_mark_buffer_dirty(parent);
if (last_ref) {
- ret = tree_mod_log_free_eb(root->fs_info, buf);
+ ret = tree_mod_log_free_eb(fs_info, buf);
if (ret) {
btrfs_abort_transaction(trans, ret);
return ret;
@@ -1359,8 +1363,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
BUG_ON(tm->slot != 0);
- eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
- eb->len);
+ eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
if (!eb_rewin) {
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
@@ -1388,7 +1391,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
btrfs_tree_read_lock(eb_rewin);
__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) >
- BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info));
return eb_rewin;
}
@@ -1403,6 +1406,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
static inline struct extent_buffer *
get_old_root(struct btrfs_root *root, u64 time_seq)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct tree_mod_elem *tm;
struct extent_buffer *eb = NULL;
struct extent_buffer *eb_root;
@@ -1412,7 +1416,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
u64 logical;
eb_root = btrfs_read_lock_root_node(root);
- tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
+ tm = __tree_mod_log_oldest_root(fs_info, eb_root, time_seq);
if (!tm)
return eb_root;
@@ -1424,16 +1428,17 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
logical = eb_root->start;
}
- tm = tree_mod_log_search(root->fs_info, logical, time_seq);
+ tm = tree_mod_log_search(fs_info, logical, time_seq);
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
- old = read_tree_block(root, logical, 0);
+ old = read_tree_block(fs_info, logical, 0);
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
if (!IS_ERR(old))
free_extent_buffer(old);
- btrfs_warn(root->fs_info,
- "failed to read tree block %llu from get_old_root", logical);
+ btrfs_warn(fs_info,
+ "failed to read tree block %llu from get_old_root",
+ logical);
} else {
eb = btrfs_clone_extent_buffer(old);
free_extent_buffer(old);
@@ -1441,8 +1446,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
} else if (old_root) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
- eb = alloc_dummy_extent_buffer(root->fs_info, logical,
- root->nodesize);
+ eb = alloc_dummy_extent_buffer(fs_info, logical);
} else {
btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
eb = btrfs_clone_extent_buffer(eb_root);
@@ -1462,10 +1466,10 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
btrfs_set_header_generation(eb, old_generation);
}
if (tm)
- __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
+ __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
else
WARN_ON(btrfs_header_level(eb) != 0);
- WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
+ WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
return eb;
}
@@ -1527,17 +1531,18 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 search_start;
int ret;
- if (trans->transaction != root->fs_info->running_transaction)
+ if (trans->transaction != fs_info->running_transaction)
WARN(1, KERN_CRIT "trans %llu running %llu\n",
trans->transid,
- root->fs_info->running_transaction->transid);
+ fs_info->running_transaction->transid);
- if (trans->transid != root->fs_info->generation)
+ if (trans->transid != fs_info->generation)
WARN(1, KERN_CRIT "trans %llu running %llu\n",
- trans->transid, root->fs_info->generation);
+ trans->transid, fs_info->generation);
if (!should_cow_block(trans, root, buf)) {
trans->dirty = true;
@@ -1614,6 +1619,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
int start_slot, u64 *last_ret,
struct btrfs_key *progress)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cur;
u64 blocknr;
u64 gen;
@@ -1632,11 +1638,11 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
parent_level = btrfs_header_level(parent);
- WARN_ON(trans->transaction != root->fs_info->running_transaction);
- WARN_ON(trans->transid != root->fs_info->generation);
+ WARN_ON(trans->transaction != fs_info->running_transaction);
+ WARN_ON(trans->transid != fs_info->generation);
parent_nritems = btrfs_header_nritems(parent);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
end_slot = parent_nritems - 1;
if (parent_nritems <= 1)
@@ -1670,14 +1676,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
continue;
}
- cur = btrfs_find_tree_block(root->fs_info, blocknr);
+ cur = find_extent_buffer(fs_info, blocknr);
if (cur)
uptodate = btrfs_buffer_uptodate(cur, gen, 0);
else
uptodate = 0;
if (!cur || !uptodate) {
if (!cur) {
- cur = read_tree_block(root, blocknr, gen);
+ cur = read_tree_block(fs_info, blocknr, gen);
if (IS_ERR(cur)) {
return PTR_ERR(cur);
} else if (!extent_buffer_uptodate(cur)) {
@@ -1715,7 +1721,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
return err;
}
-
/*
* search for key in the extent_buffer. The items start at offset p,
* and they are item_size apart. There are 'max' items in p.
@@ -1839,8 +1844,9 @@ static void root_sub_used(struct btrfs_root *root, u32 size)
/* given a node and slot number, this reads the blocks it points to. The
* extent buffer is returned with a reference taken (but unlocked).
*/
-static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
- struct extent_buffer *parent, int slot)
+static noinline struct extent_buffer *
+read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
+ int slot)
{
int level = btrfs_header_level(parent);
struct extent_buffer *eb;
@@ -1850,7 +1856,7 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
BUG_ON(level == 0);
- eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
+ eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
btrfs_node_ptr_generation(parent, slot));
if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
@@ -1869,6 +1875,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = NULL;
struct extent_buffer *mid;
struct extent_buffer *left = NULL;
@@ -1906,10 +1913,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
return 0;
/* promote the child to a root */
- child = read_node_slot(root, mid, 0);
+ child = read_node_slot(fs_info, mid, 0);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
- btrfs_handle_fs_error(root->fs_info, ret, NULL);
+ btrfs_handle_fs_error(fs_info, ret, NULL);
goto enospc;
}
@@ -1930,7 +1937,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
path->locks[level] = 0;
path->nodes[level] = NULL;
- clean_tree_block(trans, root->fs_info, mid);
+ clean_tree_block(trans, fs_info, mid);
btrfs_tree_unlock(mid);
/* once for the path */
free_extent_buffer(mid);
@@ -1942,10 +1949,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
return 0;
}
if (btrfs_header_nritems(mid) >
- BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
return 0;
- left = read_node_slot(root, parent, pslot - 1);
+ left = read_node_slot(fs_info, parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
@@ -1960,7 +1967,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
}
- right = read_node_slot(root, parent, pslot + 1);
+ right = read_node_slot(fs_info, parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
@@ -1978,7 +1985,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* first, try to make some room in the middle buffer */
if (left) {
orig_slot += btrfs_header_nritems(left);
- wret = push_node_left(trans, root, left, mid, 1);
+ wret = push_node_left(trans, fs_info, left, mid, 1);
if (wret < 0)
ret = wret;
}
@@ -1987,11 +1994,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
* then try to empty the right most buffer into the middle
*/
if (right) {
- wret = push_node_left(trans, root, mid, right, 1);
+ wret = push_node_left(trans, fs_info, mid, right, 1);
if (wret < 0 && wret != -ENOSPC)
ret = wret;
if (btrfs_header_nritems(right) == 0) {
- clean_tree_block(trans, root->fs_info, right);
+ clean_tree_block(trans, fs_info, right);
btrfs_tree_unlock(right);
del_ptr(root, path, level + 1, pslot + 1);
root_sub_used(root, right->len);
@@ -2001,7 +2008,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
} else {
struct btrfs_disk_key right_key;
btrfs_node_key(right, &right_key, 0);
- tree_mod_log_set_node_key(root->fs_info, parent,
+ tree_mod_log_set_node_key(fs_info, parent,
pslot + 1, 0);
btrfs_set_node_key(parent, &right_key, pslot + 1);
btrfs_mark_buffer_dirty(parent);
@@ -2019,23 +2026,23 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
*/
if (!left) {
ret = -EROFS;
- btrfs_handle_fs_error(root->fs_info, ret, NULL);
+ btrfs_handle_fs_error(fs_info, ret, NULL);
goto enospc;
}
- wret = balance_node_right(trans, root, mid, left);
+ wret = balance_node_right(trans, fs_info, mid, left);
if (wret < 0) {
ret = wret;
goto enospc;
}
if (wret == 1) {
- wret = push_node_left(trans, root, left, mid, 1);
+ wret = push_node_left(trans, fs_info, left, mid, 1);
if (wret < 0)
ret = wret;
}
BUG_ON(wret == 1);
}
if (btrfs_header_nritems(mid) == 0) {
- clean_tree_block(trans, root->fs_info, mid);
+ clean_tree_block(trans, fs_info, mid);
btrfs_tree_unlock(mid);
del_ptr(root, path, level + 1, pslot);
root_sub_used(root, mid->len);
@@ -2046,8 +2053,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* update the parent key to reflect our changes */
struct btrfs_disk_key mid_key;
btrfs_node_key(mid, &mid_key, 0);
- tree_mod_log_set_node_key(root->fs_info, parent,
- pslot, 0);
+ tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
btrfs_set_node_key(parent, &mid_key, pslot);
btrfs_mark_buffer_dirty(parent);
}
@@ -2094,6 +2100,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = NULL;
struct extent_buffer *mid;
struct extent_buffer *left = NULL;
@@ -2117,7 +2124,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (!parent)
return 1;
- left = read_node_slot(root, parent, pslot - 1);
+ left = read_node_slot(fs_info, parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
@@ -2129,7 +2136,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_set_lock_blocking(left);
left_nr = btrfs_header_nritems(left);
- if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
+ if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
wret = 1;
} else {
ret = btrfs_cow_block(trans, root, left, parent,
@@ -2137,7 +2144,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (ret)
wret = 1;
else {
- wret = push_node_left(trans, root,
+ wret = push_node_left(trans, fs_info,
left, mid, 0);
}
}
@@ -2147,8 +2154,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
orig_slot += left_nr;
btrfs_node_key(mid, &disk_key, 0);
- tree_mod_log_set_node_key(root->fs_info, parent,
- pslot, 0);
+ tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
btrfs_set_node_key(parent, &disk_key, pslot);
btrfs_mark_buffer_dirty(parent);
if (btrfs_header_nritems(left) > orig_slot) {
@@ -2169,7 +2175,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(left);
free_extent_buffer(left);
}
- right = read_node_slot(root, parent, pslot + 1);
+ right = read_node_slot(fs_info, parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
@@ -2183,7 +2189,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_set_lock_blocking(right);
right_nr = btrfs_header_nritems(right);
- if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
+ if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
wret = 1;
} else {
ret = btrfs_cow_block(trans, root, right,
@@ -2192,7 +2198,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (ret)
wret = 1;
else {
- wret = balance_node_right(trans, root,
+ wret = balance_node_right(trans, fs_info,
right, mid);
}
}
@@ -2202,7 +2208,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
btrfs_node_key(right, &disk_key, 0);
- tree_mod_log_set_node_key(root->fs_info, parent,
+ tree_mod_log_set_node_key(fs_info, parent,
pslot + 1, 0);
btrfs_set_node_key(parent, &disk_key, pslot + 1);
btrfs_mark_buffer_dirty(parent);
@@ -2230,7 +2236,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
* readahead one full node of leaves, finding things that are close
* to the block in 'slot', and triggering ra on them.
*/
-static void reada_for_search(struct btrfs_root *root,
+static void reada_for_search(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int level, int slot, u64 objectid)
{
@@ -2254,8 +2260,8 @@ static void reada_for_search(struct btrfs_root *root,
node = path->nodes[level];
search = btrfs_node_blockptr(node, slot);
- blocksize = root->nodesize;
- eb = btrfs_find_tree_block(root->fs_info, search);
+ blocksize = fs_info->nodesize;
+ eb = find_extent_buffer(fs_info, search);
if (eb) {
free_extent_buffer(eb);
return;
@@ -2284,7 +2290,7 @@ static void reada_for_search(struct btrfs_root *root,
search = btrfs_node_blockptr(node, nr);
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
- readahead_tree_block(root, search);
+ readahead_tree_block(fs_info, search);
nread += blocksize;
}
nscan++;
@@ -2293,7 +2299,7 @@ static void reada_for_search(struct btrfs_root *root,
}
}
-static noinline void reada_for_balance(struct btrfs_root *root,
+static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, int level)
{
int slot;
@@ -2314,7 +2320,7 @@ static noinline void reada_for_balance(struct btrfs_root *root,
if (slot > 0) {
block1 = btrfs_node_blockptr(parent, slot - 1);
gen = btrfs_node_ptr_generation(parent, slot - 1);
- eb = btrfs_find_tree_block(root->fs_info, block1);
+ eb = find_extent_buffer(fs_info, block1);
/*
* if we get -eagain from btrfs_buffer_uptodate, we
* don't want to return eagain here. That will loop
@@ -2327,16 +2333,16 @@ static noinline void reada_for_balance(struct btrfs_root *root,
if (slot + 1 < nritems) {
block2 = btrfs_node_blockptr(parent, slot + 1);
gen = btrfs_node_ptr_generation(parent, slot + 1);
- eb = btrfs_find_tree_block(root->fs_info, block2);
+ eb = find_extent_buffer(fs_info, block2);
if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
block2 = 0;
free_extent_buffer(eb);
}
if (block1)
- readahead_tree_block(root, block1);
+ readahead_tree_block(fs_info, block1);
if (block2)
- readahead_tree_block(root, block2);
+ readahead_tree_block(fs_info, block2);
}
@@ -2436,6 +2442,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
struct extent_buffer **eb_ret, int level, int slot,
struct btrfs_key *key, u64 time_seq)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 blocknr;
u64 gen;
struct extent_buffer *b = *eb_ret;
@@ -2445,7 +2452,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
blocknr = btrfs_node_blockptr(b, slot);
gen = btrfs_node_ptr_generation(b, slot);
- tmp = btrfs_find_tree_block(root->fs_info, blocknr);
+ tmp = find_extent_buffer(fs_info, blocknr);
if (tmp) {
/* first we do an atomic uptodate check */
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
@@ -2484,12 +2491,12 @@ read_block_for_search(struct btrfs_trans_handle *trans,
free_extent_buffer(tmp);
if (p->reada != READA_NONE)
- reada_for_search(root, p, level, slot, key->objectid);
+ reada_for_search(fs_info, p, level, slot, key->objectid);
btrfs_release_path(p);
ret = -EAGAIN;
- tmp = read_tree_block(root, blocknr, 0);
+ tmp = read_tree_block(fs_info, blocknr, 0);
if (!IS_ERR(tmp)) {
/*
* If the read above didn't mark this buffer up to date,
@@ -2521,9 +2528,11 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
struct extent_buffer *b, int level, int ins_len,
int *write_lock_level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
+
if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
- BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
int sret;
if (*write_lock_level < level + 1) {
@@ -2533,7 +2542,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
}
btrfs_set_path_blocking(p);
- reada_for_balance(root, p, level);
+ reada_for_balance(fs_info, p, level);
sret = split_node(trans, root, p, level);
btrfs_clear_path_blocking(p, NULL, 0);
@@ -2544,7 +2553,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
}
b = p->nodes[level];
} else if (ins_len < 0 && btrfs_header_nritems(b) <
- BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
int sret;
if (*write_lock_level < level + 1) {
@@ -2554,7 +2563,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
}
btrfs_set_path_blocking(p);
- reada_for_balance(root, p, level);
+ reada_for_balance(fs_info, p, level);
sret = balance_level(trans, root, p, level);
btrfs_clear_path_blocking(p, NULL, 0);
@@ -2663,6 +2672,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, struct btrfs_path *p, int
ins_len, int cow)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *b;
int slot;
int ret;
@@ -2718,12 +2728,12 @@ again:
* so we always do read locks
*/
if (p->need_commit_sem)
- down_read(&root->fs_info->commit_root_sem);
+ down_read(&fs_info->commit_root_sem);
b = root->commit_root;
extent_buffer_get(b);
level = btrfs_header_level(b);
if (p->need_commit_sem)
- up_read(&root->fs_info->commit_root_sem);
+ up_read(&fs_info->commit_root_sem);
if (!p->skip_locking)
btrfs_tree_read_lock(b);
} else {
@@ -2895,7 +2905,7 @@ cow_done:
} else {
p->slots[level] = slot;
if (ins_len > 0 &&
- btrfs_leaf_free_space(root, b) < ins_len) {
+ btrfs_leaf_free_space(fs_info, b) < ins_len) {
if (write_lock_level < 1) {
write_lock_level = 1;
btrfs_release_path(p);
@@ -2946,6 +2956,7 @@ done:
int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
struct btrfs_path *p, u64 time_seq)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *b;
int slot;
int ret;
@@ -3020,7 +3031,7 @@ again:
btrfs_clear_path_blocking(p, b,
BTRFS_READ_LOCK);
}
- b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
+ b = tree_mod_log_rewind(fs_info, p, b, time_seq);
if (!b) {
ret = -ENOMEM;
goto done;
@@ -3187,7 +3198,8 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
* error, and > 0 if there was no room in the left hand block.
*/
static int push_node_left(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *dst,
+ struct btrfs_fs_info *fs_info,
+ struct extent_buffer *dst,
struct extent_buffer *src, int empty)
{
int push_items = 0;
@@ -3197,7 +3209,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
src_nritems = btrfs_header_nritems(src);
dst_nritems = btrfs_header_nritems(dst);
- push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
+ push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
WARN_ON(btrfs_header_generation(src) != trans->transid);
WARN_ON(btrfs_header_generation(dst) != trans->transid);
@@ -3222,7 +3234,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
} else
push_items = min(src_nritems - 8, push_items);
- ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
+ ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
push_items);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -3261,7 +3273,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
* this will only push up to 1/2 the contents of the left node over
*/
static int balance_node_right(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct extent_buffer *dst,
struct extent_buffer *src)
{
@@ -3276,7 +3288,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
src_nritems = btrfs_header_nritems(src);
dst_nritems = btrfs_header_nritems(dst);
- push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
+ push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
if (push_items <= 0)
return 1;
@@ -3291,13 +3303,13 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
if (max_push < push_items)
push_items = max_push;
- tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
+ tree_mod_log_eb_move(fs_info, dst, push_items, 0, dst_nritems);
memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
btrfs_node_key_ptr_offset(0),
(dst_nritems) *
sizeof(struct btrfs_key_ptr));
- ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
+ ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
src_nritems - push_items, push_items);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -3328,6 +3340,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 lower_gen;
struct extent_buffer *lower;
struct extent_buffer *c;
@@ -3348,9 +3361,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
if (IS_ERR(c))
return PTR_ERR(c);
- root_add_used(root, root->nodesize);
+ root_add_used(root, fs_info->nodesize);
- memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
+ memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
btrfs_set_header_nritems(c, 1);
btrfs_set_header_level(c, level);
btrfs_set_header_bytenr(c, c->start);
@@ -3358,11 +3371,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(c, root->root_key.objectid);
- write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
- BTRFS_FSID_SIZE);
-
- write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
- btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
+ write_extent_buffer_fsid(c, fs_info->fsid);
+ write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
btrfs_set_node_key(c, &lower_key, 0);
btrfs_set_node_blockptr(c, 0, lower->start);
@@ -3396,7 +3406,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
* blocknr is the block the key points to.
*/
static void insert_ptr(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_fs_info *fs_info, struct btrfs_path *path,
struct btrfs_disk_key *key, u64 bytenr,
int slot, int level)
{
@@ -3409,10 +3419,10 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
lower = path->nodes[level];
nritems = btrfs_header_nritems(lower);
BUG_ON(slot > nritems);
- BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
+ BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
if (slot != nritems) {
if (level)
- tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
+ tree_mod_log_eb_move(fs_info, lower, slot + 1,
slot, nritems - slot);
memmove_extent_buffer(lower,
btrfs_node_key_ptr_offset(slot + 1),
@@ -3420,7 +3430,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
(nritems - slot) * sizeof(struct btrfs_key_ptr));
}
if (level) {
- ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
+ ret = tree_mod_log_insert_key(fs_info, lower, slot,
MOD_LOG_KEY_ADD, GFP_NOFS);
BUG_ON(ret < 0);
}
@@ -3445,6 +3455,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *c;
struct extent_buffer *split;
struct btrfs_disk_key disk_key;
@@ -3472,7 +3483,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
ret = push_nodes_for_insert(trans, root, path, level);
c = path->nodes[level];
if (!ret && btrfs_header_nritems(c) <
- BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
return 0;
if (ret < 0)
return ret;
@@ -3487,22 +3498,18 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
if (IS_ERR(split))
return PTR_ERR(split);
- root_add_used(root, root->nodesize);
+ root_add_used(root, fs_info->nodesize);
- memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
+ memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
btrfs_set_header_level(split, btrfs_header_level(c));
btrfs_set_header_bytenr(split, split->start);
btrfs_set_header_generation(split, trans->transid);
btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(split, root->root_key.objectid);
- write_extent_buffer(split, root->fs_info->fsid,
- btrfs_header_fsid(), BTRFS_FSID_SIZE);
- write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
- btrfs_header_chunk_tree_uuid(split),
- BTRFS_UUID_SIZE);
-
- ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
- mid, c_nritems - mid);
+ write_extent_buffer_fsid(split, fs_info->fsid);
+ write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
+
+ ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
if (ret) {
btrfs_abort_transaction(trans, ret);
return ret;
@@ -3518,7 +3525,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(c);
btrfs_mark_buffer_dirty(split);
- insert_ptr(trans, root, path, &disk_key, split->start,
+ insert_ptr(trans, fs_info, path, &disk_key, split->start,
path->slots[level + 1] + 1, level + 1);
if (path->slots[level] >= mid) {
@@ -3566,17 +3573,19 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr)
* the start of the leaf data. IOW, how much room
* the leaf has left for both items and data
*/
-noinline int btrfs_leaf_free_space(struct btrfs_root *root,
+noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf)
{
int nritems = btrfs_header_nritems(leaf);
int ret;
- ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
+
+ ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
if (ret < 0) {
- btrfs_crit(root->fs_info,
- "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
- ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
- leaf_space_used(leaf, 0, nritems), nritems);
+ btrfs_crit(fs_info,
+ "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
+ ret,
+ (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
+ leaf_space_used(leaf, 0, nritems), nritems);
}
return ret;
}
@@ -3586,7 +3595,7 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root,
* right. We'll push up to and including min_slot, but no lower
*/
static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int data_size, int empty,
struct extent_buffer *right,
@@ -3626,7 +3635,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (path->slots[0] > i)
break;
if (path->slots[0] == i) {
- int space = btrfs_leaf_free_space(root, left);
+ int space = btrfs_leaf_free_space(fs_info, left);
if (space + push_space * 2 > free_space)
break;
}
@@ -3655,19 +3664,19 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
right_nritems = btrfs_header_nritems(right);
push_space = btrfs_item_end_nr(left, left_nritems - push_items);
- push_space -= leaf_data_end(root, left);
+ push_space -= leaf_data_end(fs_info, left);
/* make room in the right data area */
- data_end = leaf_data_end(root, right);
+ data_end = leaf_data_end(fs_info, right);
memmove_extent_buffer(right,
btrfs_leaf_data(right) + data_end - push_space,
btrfs_leaf_data(right) + data_end,
- BTRFS_LEAF_DATA_SIZE(root) - data_end);
+ BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
/* copy from the left data area */
copy_extent_buffer(right, left, btrfs_leaf_data(right) +
- BTRFS_LEAF_DATA_SIZE(root) - push_space,
- btrfs_leaf_data(left) + leaf_data_end(root, left),
+ BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
+ btrfs_leaf_data(left) + leaf_data_end(fs_info, left),
push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
@@ -3682,7 +3691,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
/* update the item pointers */
right_nritems += push_items;
btrfs_set_header_nritems(right, right_nritems);
- push_space = BTRFS_LEAF_DATA_SIZE(root);
+ push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(i);
push_space -= btrfs_token_item_size(right, item, &token);
@@ -3695,7 +3704,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (left_nritems)
btrfs_mark_buffer_dirty(left);
else
- clean_tree_block(trans, root->fs_info, left);
+ clean_tree_block(trans, fs_info, left);
btrfs_mark_buffer_dirty(right);
@@ -3707,7 +3716,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (path->slots[0] >= left_nritems) {
path->slots[0] -= left_nritems;
if (btrfs_header_nritems(path->nodes[0]) == 0)
- clean_tree_block(trans, root->fs_info, path->nodes[0]);
+ clean_tree_block(trans, fs_info, path->nodes[0]);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@@ -3739,6 +3748,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
int min_data_size, int data_size,
int empty, u32 min_slot)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *right;
struct extent_buffer *upper;
@@ -3757,7 +3767,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
- right = read_node_slot(root, upper, slot + 1);
+ right = read_node_slot(fs_info, upper, slot + 1);
/*
* slot + 1 is not valid or we fail to read the right node,
* no big deal, just return.
@@ -3768,7 +3778,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_tree_lock(right);
btrfs_set_lock_blocking(right);
- free_space = btrfs_leaf_free_space(root, right);
+ free_space = btrfs_leaf_free_space(fs_info, right);
if (free_space < data_size)
goto out_unlock;
@@ -3778,7 +3788,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (ret)
goto out_unlock;
- free_space = btrfs_leaf_free_space(root, right);
+ free_space = btrfs_leaf_free_space(fs_info, right);
if (free_space < data_size)
goto out_unlock;
@@ -3799,7 +3809,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
return 0;
}
- return __push_leaf_right(trans, root, path, min_data_size, empty,
+ return __push_leaf_right(trans, fs_info, path, min_data_size, empty,
right, free_space, left_nritems, min_slot);
out_unlock:
btrfs_tree_unlock(right);
@@ -3816,7 +3826,7 @@ out_unlock:
* items
*/
static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path, int data_size,
int empty, struct extent_buffer *left,
int free_space, u32 right_nritems,
@@ -3849,7 +3859,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
if (path->slots[0] < i)
break;
if (path->slots[0] == i) {
- int space = btrfs_leaf_free_space(root, right);
+ int space = btrfs_leaf_free_space(fs_info, right);
if (space + push_space * 2 > free_space)
break;
}
@@ -3878,11 +3888,11 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
btrfs_item_nr_offset(0),
push_items * sizeof(struct btrfs_item));
- push_space = BTRFS_LEAF_DATA_SIZE(root) -
+ push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
btrfs_item_offset_nr(right, push_items - 1);
copy_extent_buffer(left, right, btrfs_leaf_data(left) +
- leaf_data_end(root, left) - push_space,
+ leaf_data_end(fs_info, left) - push_space,
btrfs_leaf_data(right) +
btrfs_item_offset_nr(right, push_items - 1),
push_space);
@@ -3897,7 +3907,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
ioff = btrfs_token_item_offset(left, item, &token);
btrfs_set_token_item_offset(left, item,
- ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
+ ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
&token);
}
btrfs_set_header_nritems(left, old_left_nritems + push_items);
@@ -3909,11 +3919,11 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
if (push_items < right_nritems) {
push_space = btrfs_item_offset_nr(right, push_items - 1) -
- leaf_data_end(root, right);
+ leaf_data_end(fs_info, right);
memmove_extent_buffer(right, btrfs_leaf_data(right) +
- BTRFS_LEAF_DATA_SIZE(root) - push_space,
+ BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
btrfs_leaf_data(right) +
- leaf_data_end(root, right), push_space);
+ leaf_data_end(fs_info, right), push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(0),
btrfs_item_nr_offset(push_items),
@@ -3922,7 +3932,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
}
right_nritems -= push_items;
btrfs_set_header_nritems(right, right_nritems);
- push_space = BTRFS_LEAF_DATA_SIZE(root);
+ push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(i);
@@ -3935,10 +3945,10 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
if (right_nritems)
btrfs_mark_buffer_dirty(right);
else
- clean_tree_block(trans, root->fs_info, right);
+ clean_tree_block(trans, fs_info, right);
btrfs_item_key(right, &disk_key, 0);
- fixup_low_keys(root->fs_info, path, &disk_key, 1);
+ fixup_low_keys(fs_info, path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
@@ -3972,6 +3982,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path, int min_data_size,
int data_size, int empty, u32 max_slot)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = path->nodes[0];
struct extent_buffer *left;
int slot;
@@ -3991,7 +4002,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
- left = read_node_slot(root, path->nodes[1], slot - 1);
+ left = read_node_slot(fs_info, path->nodes[1], slot - 1);
/*
* slot - 1 is not valid or we fail to read the left node,
* no big deal, just return.
@@ -4002,7 +4013,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_tree_lock(left);
btrfs_set_lock_blocking(left);
- free_space = btrfs_leaf_free_space(root, left);
+ free_space = btrfs_leaf_free_space(fs_info, left);
if (free_space < data_size) {
ret = 1;
goto out;
@@ -4018,13 +4029,13 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
goto out;
}
- free_space = btrfs_leaf_free_space(root, left);
+ free_space = btrfs_leaf_free_space(fs_info, left);
if (free_space < data_size) {
ret = 1;
goto out;
}
- return __push_leaf_left(trans, root, path, min_data_size,
+ return __push_leaf_left(trans, fs_info, path, min_data_size,
empty, left, free_space, right_nritems,
max_slot);
out:
@@ -4038,7 +4049,7 @@ out:
* available for the resulting leaf level of the path.
*/
static noinline void copy_for_split(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct extent_buffer *l,
struct extent_buffer *right,
@@ -4054,19 +4065,18 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
nritems = nritems - mid;
btrfs_set_header_nritems(right, nritems);
- data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
+ data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
btrfs_item_nr_offset(mid),
nritems * sizeof(struct btrfs_item));
copy_extent_buffer(right, l,
- btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
+ btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) -
data_copy_size, btrfs_leaf_data(l) +
- leaf_data_end(root, l), data_copy_size);
+ leaf_data_end(fs_info, l), data_copy_size);
- rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
- btrfs_item_end_nr(l, mid);
+ rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
for (i = 0; i < nritems; i++) {
struct btrfs_item *item = btrfs_item_nr(i);
@@ -4079,7 +4089,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(l, mid);
btrfs_item_key(right, &disk_key, 0);
- insert_ptr(trans, root, path, &disk_key, right->start,
+ insert_ptr(trans, fs_info, path, &disk_key, right->start,
path->slots[1] + 1, 1);
btrfs_mark_buffer_dirty(right);
@@ -4115,6 +4125,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
int data_size)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int progress = 0;
int slot;
@@ -4123,7 +4134,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
slot = path->slots[0];
if (slot < btrfs_header_nritems(path->nodes[0]))
- space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
+ space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
/*
* try to push all the items after our slot into the
@@ -4144,7 +4155,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
if (path->slots[0] == 0 || path->slots[0] == nritems)
return 0;
- if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+ if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
return 0;
/* try to push all the items before our slot into the next leaf */
@@ -4189,7 +4200,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
l = path->nodes[0];
slot = path->slots[0];
if (extend && data_size + btrfs_item_size_nr(l, slot) +
- sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
+ sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
return -EOVERFLOW;
/* first try to make some room by pushing left and right */
@@ -4197,7 +4208,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
int space_needed = data_size;
if (slot < btrfs_header_nritems(l))
- space_needed -= btrfs_leaf_free_space(root, l);
+ space_needed -= btrfs_leaf_free_space(fs_info, l);
wret = push_leaf_right(trans, root, path, space_needed,
space_needed, 0, 0);
@@ -4212,7 +4223,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
l = path->nodes[0];
/* did the pushes work? */
- if (btrfs_leaf_free_space(root, l) >= data_size)
+ if (btrfs_leaf_free_space(fs_info, l) >= data_size)
return 0;
}
@@ -4231,14 +4242,14 @@ again:
if (mid <= slot) {
if (nritems == 1 ||
leaf_space_used(l, mid, nritems - mid) + data_size >
- BTRFS_LEAF_DATA_SIZE(root)) {
+ BTRFS_LEAF_DATA_SIZE(fs_info)) {
if (slot >= nritems) {
split = 0;
} else {
mid = slot;
if (mid != nritems &&
leaf_space_used(l, mid, nritems - mid) +
- data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+ data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
if (data_size && !tried_avoid_double)
goto push_for_double;
split = 2;
@@ -4247,7 +4258,7 @@ again:
}
} else {
if (leaf_space_used(l, 0, mid) + data_size >
- BTRFS_LEAF_DATA_SIZE(root)) {
+ BTRFS_LEAF_DATA_SIZE(fs_info)) {
if (!extend && data_size && slot == 0) {
split = 0;
} else if ((extend || !data_size) && slot == 0) {
@@ -4256,7 +4267,7 @@ again:
mid = slot;
if (mid != nritems &&
leaf_space_used(l, mid, nritems - mid) +
- data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+ data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
if (data_size && !tried_avoid_double)
goto push_for_double;
split = 2;
@@ -4275,26 +4286,22 @@ again:
if (IS_ERR(right))
return PTR_ERR(right);
- root_add_used(root, root->nodesize);
+ root_add_used(root, fs_info->nodesize);
- memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
+ memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(right, right->start);
btrfs_set_header_generation(right, trans->transid);
btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(right, root->root_key.objectid);
btrfs_set_header_level(right, 0);
- write_extent_buffer(right, fs_info->fsid,
- btrfs_header_fsid(), BTRFS_FSID_SIZE);
-
- write_extent_buffer(right, fs_info->chunk_tree_uuid,
- btrfs_header_chunk_tree_uuid(right),
- BTRFS_UUID_SIZE);
+ write_extent_buffer_fsid(right, fs_info->fsid);
+ write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
if (split == 0) {
if (mid <= slot) {
btrfs_set_header_nritems(right, 0);
- insert_ptr(trans, root, path, &disk_key, right->start,
- path->slots[1] + 1, 1);
+ insert_ptr(trans, fs_info, path, &disk_key,
+ right->start, path->slots[1] + 1, 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@@ -4302,8 +4309,8 @@ again:
path->slots[1] += 1;
} else {
btrfs_set_header_nritems(right, 0);
- insert_ptr(trans, root, path, &disk_key, right->start,
- path->slots[1], 1);
+ insert_ptr(trans, fs_info, path, &disk_key,
+ right->start, path->slots[1], 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@@ -4319,7 +4326,7 @@ again:
return ret;
}
- copy_for_split(trans, root, path, l, right, slot, mid, nritems);
+ copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
if (split == 2) {
BUG_ON(num_doubles != 0);
@@ -4332,7 +4339,7 @@ again:
push_for_double:
push_for_double_split(trans, root, path, data_size);
tried_avoid_double = 1;
- if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+ if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
return 0;
goto again;
}
@@ -4341,6 +4348,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int ins_len)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
@@ -4354,7 +4362,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
key.type != BTRFS_EXTENT_CSUM_KEY);
- if (btrfs_leaf_free_space(root, leaf) >= ins_len)
+ if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
return 0;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -4381,7 +4389,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
goto err;
/* the leaf has changed, it now has room. return now */
- if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
+ if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
goto err;
if (key.type == BTRFS_EXTENT_DATA_KEY) {
@@ -4405,7 +4413,7 @@ err:
}
static noinline int split_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *new_key,
unsigned long split_offset)
@@ -4421,7 +4429,7 @@ static noinline int split_item(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
leaf = path->nodes[0];
- BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
+ BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
btrfs_set_path_blocking(path);
@@ -4470,7 +4478,7 @@ static noinline int split_item(struct btrfs_trans_handle *trans,
item_size - split_offset);
btrfs_mark_buffer_dirty(leaf);
- BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
+ BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
kfree(buf);
return 0;
}
@@ -4502,7 +4510,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = split_item(trans, root, path, new_key, split_offset);
+ ret = split_item(trans, root->fs_info, path, new_key, split_offset);
return ret;
}
@@ -4548,8 +4556,8 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
* off the end of the item or if we shift the item to chop bytes off
* the front.
*/
-void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
- u32 new_size, int from_end)
+void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path, u32 new_size, int from_end)
{
int slot;
struct extent_buffer *leaf;
@@ -4572,7 +4580,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
return;
nritems = btrfs_header_nritems(leaf);
- data_end = leaf_data_end(root, leaf);
+ data_end = leaf_data_end(fs_info, leaf);
old_data_start = btrfs_item_offset_nr(leaf, slot);
@@ -4631,15 +4639,15 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
- fixup_low_keys(root->fs_info, path, &disk_key, 1);
+ fixup_low_keys(fs_info, path, &disk_key, 1);
}
item = btrfs_item_nr(slot);
btrfs_set_item_size(leaf, item, new_size);
btrfs_mark_buffer_dirty(leaf);
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+ btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
@@ -4647,7 +4655,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
/*
* make the item pointed to by the path bigger, data_size is the added size.
*/
-void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u32 data_size)
{
int slot;
@@ -4665,10 +4673,10 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
- data_end = leaf_data_end(root, leaf);
+ data_end = leaf_data_end(fs_info, leaf);
- if (btrfs_leaf_free_space(root, leaf) < data_size) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
+ btrfs_print_leaf(fs_info, leaf);
BUG();
}
slot = path->slots[0];
@@ -4676,9 +4684,9 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
BUG_ON(slot < 0);
if (slot >= nritems) {
- btrfs_print_leaf(root, leaf);
- btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
- slot, nritems);
+ btrfs_print_leaf(fs_info, leaf);
+ btrfs_crit(fs_info, "slot %d too large, nritems %d",
+ slot, nritems);
BUG_ON(1);
}
@@ -4706,8 +4714,8 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_item_size(leaf, item, old_size + data_size);
btrfs_mark_buffer_dirty(leaf);
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+ btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
@@ -4721,6 +4729,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *cpu_key, u32 *data_size,
u32 total_data, u32 total_size, int nr)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_item *item;
int i;
u32 nritems;
@@ -4732,7 +4741,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (path->slots[0] == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- fixup_low_keys(root->fs_info, path, &disk_key, 1);
+ fixup_low_keys(fs_info, path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
@@ -4742,13 +4751,12 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
slot = path->slots[0];
nritems = btrfs_header_nritems(leaf);
- data_end = leaf_data_end(root, leaf);
+ data_end = leaf_data_end(fs_info, leaf);
- if (btrfs_leaf_free_space(root, leaf) < total_size) {
- btrfs_print_leaf(root, leaf);
- btrfs_crit(root->fs_info,
- "not enough freespace need %u have %d",
- total_size, btrfs_leaf_free_space(root, leaf));
+ if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
+ btrfs_print_leaf(fs_info, leaf);
+ btrfs_crit(fs_info, "not enough freespace need %u have %d",
+ total_size, btrfs_leaf_free_space(fs_info, leaf));
BUG();
}
@@ -4756,9 +4764,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
unsigned int old_data = btrfs_item_end_nr(leaf, slot);
if (old_data < data_end) {
- btrfs_print_leaf(root, leaf);
- btrfs_crit(root->fs_info,
- "slot %d old_data %d data_end %d",
+ btrfs_print_leaf(fs_info, leaf);
+ btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
slot, old_data, data_end);
BUG_ON(1);
}
@@ -4800,8 +4807,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_header_nritems(leaf, nritems + nr);
btrfs_mark_buffer_dirty(leaf);
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
+ if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+ btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
@@ -4876,6 +4883,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
int level, int slot)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *parent = path->nodes[level];
u32 nritems;
int ret;
@@ -4883,7 +4891,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
nritems = btrfs_header_nritems(parent);
if (slot != nritems - 1) {
if (level)
- tree_mod_log_eb_move(root->fs_info, parent, slot,
+ tree_mod_log_eb_move(fs_info, parent, slot,
slot + 1, nritems - slot - 1);
memmove_extent_buffer(parent,
btrfs_node_key_ptr_offset(slot),
@@ -4891,7 +4899,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
sizeof(struct btrfs_key_ptr) *
(nritems - slot - 1));
} else if (level) {
- ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
+ ret = tree_mod_log_insert_key(fs_info, parent, slot,
MOD_LOG_KEY_REMOVE, GFP_NOFS);
BUG_ON(ret < 0);
}
@@ -4906,7 +4914,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
- fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
+ fixup_low_keys(fs_info, path, &disk_key, level + 1);
}
btrfs_mark_buffer_dirty(parent);
}
@@ -4948,6 +4956,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_item *item;
u32 last_off;
@@ -4969,7 +4978,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
nritems = btrfs_header_nritems(leaf);
if (slot + nr != nritems) {
- int data_end = leaf_data_end(root, leaf);
+ int data_end = leaf_data_end(fs_info, leaf);
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
data_end + dsize,
@@ -4999,7 +5008,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_header_level(leaf, 0);
} else {
btrfs_set_path_blocking(path);
- clean_tree_block(trans, root->fs_info, leaf);
+ clean_tree_block(trans, fs_info, leaf);
btrfs_del_leaf(trans, root, path, leaf);
}
} else {
@@ -5008,11 +5017,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
- fixup_low_keys(root->fs_info, path, &disk_key, 1);
+ fixup_low_keys(fs_info, path, &disk_key, 1);
}
/* delete the leaf if it is mostly empty */
- if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
+ if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
/* push_leaf_left fixes the path.
* make sure the path still points to our leaf
* for possible call to del_ptr below
@@ -5132,6 +5141,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_path *path,
u64 min_trans)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cur;
struct btrfs_key found_key;
int slot;
@@ -5208,7 +5218,7 @@ find_next_key:
goto out;
}
btrfs_set_path_blocking(path);
- cur = read_node_slot(root, cur, slot);
+ cur = read_node_slot(fs_info, cur, slot);
if (IS_ERR(cur)) {
ret = PTR_ERR(cur);
goto out;
@@ -5231,14 +5241,14 @@ out:
return ret;
}
-static int tree_move_down(struct btrfs_root *root,
+static int tree_move_down(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level)
{
struct extent_buffer *eb;
BUG_ON(*level == 0);
- eb = read_node_slot(root, path->nodes[*level], path->slots[*level]);
+ eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
if (IS_ERR(eb))
return PTR_ERR(eb);
@@ -5248,7 +5258,7 @@ static int tree_move_down(struct btrfs_root *root,
return 0;
}
-static int tree_move_next_or_upnext(struct btrfs_root *root,
+static int tree_move_next_or_upnext(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level)
{
@@ -5279,7 +5289,7 @@ static int tree_move_next_or_upnext(struct btrfs_root *root,
* Returns 1 if it had to move up and next. 0 is returned if it moved only next
* or down.
*/
-static int tree_advance(struct btrfs_root *root,
+static int tree_advance(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level,
int allow_down,
@@ -5288,9 +5298,10 @@ static int tree_advance(struct btrfs_root *root,
int ret;
if (*level == 0 || !allow_down) {
- ret = tree_move_next_or_upnext(root, path, level, root_level);
+ ret = tree_move_next_or_upnext(fs_info, path, level,
+ root_level);
} else {
- ret = tree_move_down(root, path, level, root_level);
+ ret = tree_move_down(fs_info, path, level, root_level);
}
if (ret >= 0) {
if (*level == 0)
@@ -5303,8 +5314,7 @@ static int tree_advance(struct btrfs_root *root,
return ret;
}
-static int tree_compare_item(struct btrfs_root *left_root,
- struct btrfs_path *left_path,
+static int tree_compare_item(struct btrfs_path *left_path,
struct btrfs_path *right_path,
char *tmp_buf)
{
@@ -5349,6 +5359,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
struct btrfs_root *right_root,
btrfs_changed_cb_t changed_cb, void *ctx)
{
+ struct btrfs_fs_info *fs_info = left_root->fs_info;
int ret;
int cmp;
struct btrfs_path *left_path = NULL;
@@ -5380,9 +5391,9 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
}
- tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
+ tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
if (!tmp_buf) {
- tmp_buf = vmalloc(left_root->nodesize);
+ tmp_buf = vmalloc(fs_info->nodesize);
if (!tmp_buf) {
ret = -ENOMEM;
goto out;
@@ -5430,7 +5441,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
* the right if possible or go up and right.
*/
- down_read(&left_root->fs_info->commit_root_sem);
+ down_read(&fs_info->commit_root_sem);
left_level = btrfs_header_level(left_root->commit_root);
left_root_level = left_level;
left_path->nodes[left_level] = left_root->commit_root;
@@ -5440,7 +5451,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
right_root_level = right_level;
right_path->nodes[right_level] = right_root->commit_root;
extent_buffer_get(right_path->nodes[right_level]);
- up_read(&left_root->fs_info->commit_root_sem);
+ up_read(&fs_info->commit_root_sem);
if (left_level == 0)
btrfs_item_key_to_cpu(left_path->nodes[left_level],
@@ -5460,7 +5471,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
while (1) {
if (advance_left && !left_end_reached) {
- ret = tree_advance(left_root, left_path, &left_level,
+ ret = tree_advance(fs_info, left_path, &left_level,
left_root_level,
advance_left != ADVANCE_ONLY_NEXT,
&left_key);
@@ -5471,7 +5482,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
advance_left = 0;
}
if (advance_right && !right_end_reached) {
- ret = tree_advance(right_root, right_path, &right_level,
+ ret = tree_advance(fs_info, right_path, &right_level,
right_root_level,
advance_right != ADVANCE_ONLY_NEXT,
&right_key);
@@ -5535,8 +5546,8 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
enum btrfs_compare_tree_result result;
WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
- ret = tree_compare_item(left_root, left_path,
- right_path, tmp_buf);
+ ret = tree_compare_item(left_path, right_path,
+ tmp_buf);
if (ret)
result = BTRFS_COMPARE_TREE_CHANGED;
else
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0b8ce2b9f7d0..50bcfb80d33a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -90,9 +90,6 @@ static const int btrfs_csum_sizes[] = { 4 };
/* four bytes for CRC32 */
#define BTRFS_EMPTY_DIR_SIZE 0
-/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
-#define REQ_GET_READ_MIRRORS (1 << 30)
-
/* ioprio of readahead is set to idle */
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
@@ -340,7 +337,7 @@ struct btrfs_path {
unsigned int need_commit_sem:1;
unsigned int skip_release_on_error:1;
};
-#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \
+#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
sizeof(struct btrfs_item))
struct btrfs_dev_replace {
u64 replace_state; /* see #define above */
@@ -429,6 +426,10 @@ struct btrfs_space_info {
struct list_head ro_bgs;
struct list_head priority_tickets;
struct list_head tickets;
+ /*
+ * tickets_id just indicates the next ticket will be handled, so note
+ * it's not stored per ticket.
+ */
u64 tickets_id;
struct rw_semaphore groups_sem;
@@ -518,7 +519,7 @@ struct btrfs_io_ctl {
void *cur, *orig;
struct page *page;
struct page **pages;
- struct btrfs_root *root;
+ struct btrfs_fs_info *fs_info;
struct inode *inode;
unsigned long size;
int index;
@@ -798,7 +799,6 @@ struct btrfs_fs_info {
spinlock_t super_lock;
struct btrfs_super_block *super_copy;
struct btrfs_super_block *super_for_commit;
- struct block_device *__bdev;
struct super_block *sb;
struct inode *btree_inode;
struct backing_dev_info bdi;
@@ -1084,8 +1084,18 @@ struct btrfs_fs_info {
/* Used to record internally whether fs has been frozen */
int fs_frozen;
+
+ /* Cached block sizes */
+ u32 nodesize;
+ u32 sectorsize;
+ u32 stripesize;
};
+static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
struct btrfs_subvolume_writers {
struct percpu_counter counter;
wait_queue_head_t wait;
@@ -1159,14 +1169,6 @@ struct btrfs_root {
u64 objectid;
u64 last_trans;
- /* data allocations are done in sectorsize units */
- u32 sectorsize;
-
- /* node allocations are done in nodesize units */
- u32 nodesize;
-
- u32 stripesize;
-
u32 type;
u64 highest_objectid;
@@ -1250,38 +1252,42 @@ struct btrfs_root {
/* For qgroup metadata space reserve */
atomic_t qgroup_meta_rsv;
};
+static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
+{
+ return btrfs_sb(inode->i_sb)->sectorsize;
+}
static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize)
{
return blocksize - sizeof(struct btrfs_header);
}
-static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_root *root)
+static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
{
- return __BTRFS_LEAF_DATA_SIZE(root->nodesize);
+ return __BTRFS_LEAF_DATA_SIZE(info->nodesize);
}
-static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_root *root)
+static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
{
- return BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+ return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
}
-static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_root *root)
+static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
{
- return BTRFS_LEAF_DATA_SIZE(root) / sizeof(struct btrfs_key_ptr);
+ return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
}
#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
(offsetof(struct btrfs_file_extent_item, disk_bytenr))
-static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_root *root)
+static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
{
- return BTRFS_MAX_ITEM_SIZE(root) -
+ return BTRFS_MAX_ITEM_SIZE(info) -
BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
-static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root)
+static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
{
- return BTRFS_MAX_ITEM_SIZE(root) - sizeof(struct btrfs_dir_item);
+ return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
}
/*
@@ -1343,12 +1349,13 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root)
#ifdef CONFIG_BTRFS_DEBUG
static inline int
-btrfs_should_fragment_free_space(struct btrfs_root *root,
- struct btrfs_block_group_cache *block_group)
+btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group)
{
- return (btrfs_test_opt(root->fs_info, FRAGMENT_METADATA) &&
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+
+ return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
- (btrfs_test_opt(root->fs_info, FRAGMENT_DATA) &&
+ (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_DATA);
}
#endif
@@ -2210,6 +2217,8 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
cpu->target = le64_to_cpu(disk->target);
cpu->flags = le64_to_cpu(disk->flags);
cpu->limit = le64_to_cpu(disk->limit);
+ cpu->stripes_min = le32_to_cpu(disk->stripes_min);
+ cpu->stripes_max = le32_to_cpu(disk->stripes_max);
}
static inline void
@@ -2228,6 +2237,8 @@ btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
disk->target = cpu_to_le64(cpu->target);
disk->flags = cpu_to_le64(cpu->flags);
disk->limit = cpu_to_le64(cpu->limit);
+ disk->stripes_min = cpu_to_le32(cpu->stripes_min);
+ disk->stripes_max = cpu_to_le32(cpu->stripes_max);
}
/* struct btrfs_super_block */
@@ -2299,13 +2310,13 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
* this returns the address of the start of the last item,
* which is the stop of the leaf data stack
*/
-static inline unsigned int leaf_data_end(struct btrfs_root *root,
+static inline unsigned int leaf_data_end(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf)
{
u32 nr = btrfs_header_nritems(leaf);
if (nr == 0)
- return BTRFS_LEAF_DATA_SIZE(root);
+ return BTRFS_LEAF_DATA_SIZE(fs_info);
return btrfs_item_offset_nr(leaf, nr - 1);
}
@@ -2501,11 +2512,6 @@ BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left,
BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
struct btrfs_dev_replace_item, cursor_right, 64);
-static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
/* helper function to cast into the data area of the leaf. */
#define btrfs_item_ptr(leaf, slot, type) \
((type *)(btrfs_leaf_data(leaf) + \
@@ -2528,28 +2534,28 @@ static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
/* extent-tree.c */
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes);
+u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
-static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
+static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
{
- return root->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
+ return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
}
/*
* Doing a truncate won't result in new nodes or leaves, just what we need for
* COW.
*/
-static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root,
+static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
{
- return root->nodesize * BTRFS_MAX_LEVEL * num_items;
+ return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
@@ -2558,18 +2564,18 @@ void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, unsigned long count);
-int btrfs_async_run_delayed_refs(struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info, unsigned long count);
+int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait);
-int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr,
+ struct btrfs_fs_info *fs_info, u64 bytenr,
u64 offset, int metadata, u64 *refs, u64 *flags);
-int btrfs_pin_extent(struct btrfs_root *root,
+int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num, int reserved);
-int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes);
-int btrfs_exclude_logged_extents(struct btrfs_root *root,
+int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -2590,12 +2596,11 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct extent_buffer *buf,
u64 parent, int last_ref);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
u64 root_objectid, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins);
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
@@ -2606,52 +2611,52 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref);
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data);
int btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset);
-int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
- int delalloc);
-int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
+ u64 start, u64 len, int delalloc);
+int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len);
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset);
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
-int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
+ struct btrfs_fs_info *fs_info);
+int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_free_block_groups(struct btrfs_fs_info *info);
-int btrfs_read_block_groups(struct btrfs_root *root);
-int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr);
+int btrfs_read_block_groups(struct btrfs_fs_info *info);
+int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytes_used,
+ struct btrfs_fs_info *fs_info, u64 bytes_used,
u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 group_start,
+ struct btrfs_fs_info *fs_info, u64 group_start,
struct extent_map *em);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
@@ -2681,7 +2686,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode);
@@ -2690,7 +2695,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int nitems,
u64 *qgroup_reserved, bool use_global_rsv);
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
u64 qgroup_reserved);
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
@@ -2698,16 +2703,15 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type);
-void btrfs_free_block_rsv(struct btrfs_root *root,
+void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
-int btrfs_block_rsv_check(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, int min_factor);
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
enum btrfs_reserve_flush_enum flush);
@@ -2717,22 +2721,21 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
-void btrfs_block_rsv_release(struct btrfs_root *root,
+void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
int btrfs_inc_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache);
-void btrfs_dec_block_group_ro(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache);
+void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
-int btrfs_error_unpin_extent_range(struct btrfs_root *root,
+int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
-int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 type);
-int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
+ struct btrfs_fs_info *fs_info, u64 type);
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
@@ -2742,8 +2745,7 @@ int btrfs_start_write_no_snapshoting(struct btrfs_root *root);
void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
void check_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const u64 type);
+ struct btrfs_fs_info *fs_info, const u64 type);
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_fs_info *info, u64 start, u64 end);
@@ -2793,10 +2795,10 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct extent_buffer **cow_ret, u64 new_root_objectid);
int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf);
-void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u32 data_size);
-void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
- u32 new_size, int from_end);
+void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path, u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -2872,7 +2874,8 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
{
return btrfs_next_old_item(root, p, 0);
}
-int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
+int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf);
int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
int update_ref, int for_reloc);
@@ -2898,10 +2901,9 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
* anything except sleeping. This function is used to check the status of
* the fs.
*/
-static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root)
+static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
{
- return (root->fs_info->sb->s_flags & MS_RDONLY ||
- btrfs_fs_closing(root->fs_info));
+ return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
}
static inline void free_fs_info(struct btrfs_fs_info *fs_info)
@@ -2931,11 +2933,11 @@ int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *tree_root,
+ struct btrfs_fs_info *fs_info,
u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
const char *name, int name_len);
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *tree_root,
+ struct btrfs_fs_info *fs_info,
u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
const char *name, int name_len);
int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2950,7 +2952,7 @@ int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key,
struct btrfs_path *path, struct btrfs_root_item *root_item,
struct btrfs_key *root_key);
-int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
+int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info);
void btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
@@ -2959,10 +2961,10 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
/* uuid-tree.c */
int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
- struct btrfs_root *uuid_root, u8 *uuid, u8 type,
+ struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
u64 subid);
int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
- struct btrfs_root *uuid_root, u8 *uuid, u8 type,
+ struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
u64 subid);
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
int (*check_func)(struct btrfs_fs_info *, u8 *, u8,
@@ -3004,10 +3006,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
-int verify_dir_item(struct btrfs_root *root,
+int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item);
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name,
int name_len);
@@ -3051,11 +3053,10 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
/* file-item.c */
struct btrfs_dio_private;
int btrfs_del_csums(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr, u64 len);
-int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u32 *dst);
-int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 logical_offset);
+ struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
+int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
+int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
+ u64 logical_offset);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 pos,
@@ -3069,8 +3070,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
-int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 file_start, int contig);
+int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+ u64 file_start, int contig);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit);
void btrfs_extent_item_to_extent_map(struct inode *inode,
@@ -3173,7 +3174,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
void btrfs_invalidate_inodes(struct btrfs_root *root);
void btrfs_add_delayed_iput(struct inode *inode);
-void btrfs_run_delayed_iputs(struct btrfs_root *root);
+void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
@@ -3227,9 +3228,8 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
-int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
- struct page **pages, size_t num_pages,
- loff_t pos, size_t write_bytes,
+int btrfs_dirty_pages(struct inode *inode, struct page **pages,
+ size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached);
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in,
@@ -3252,7 +3252,7 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
/* super.c */
-int btrfs_parse_options(struct btrfs_root *root, char *options,
+int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags);
int btrfs_sync_fs(struct super_block *sb, int wait);
@@ -3445,9 +3445,14 @@ do { \
/* Report first abort since mount */ \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
&((trans)->fs_info->fs_state))) { \
- WARN(1, KERN_DEBUG \
- "BTRFS: Transaction aborted (error %d)\n", \
- (errno)); \
+ if ((errno) != -EIO) { \
+ WARN(1, KERN_DEBUG \
+ "BTRFS: Transaction aborted (error %d)\n", \
+ (errno)); \
+ } else { \
+ pr_debug("BTRFS: Transaction aborted (error %d)\n", \
+ (errno)); \
+ } \
} \
__btrfs_abort_transaction((trans), __func__, \
__LINE__, (errno)); \
@@ -3609,7 +3614,7 @@ static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
#endif
/* relocation.c */
-int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start);
int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
@@ -3628,12 +3633,12 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
u64 end, struct btrfs_scrub_progress *progress,
int readonly, int is_dev_replace);
-void btrfs_scrub_pause(struct btrfs_root *root);
-void btrfs_scrub_continue(struct btrfs_root *root);
+void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
+void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
int btrfs_scrub_cancel(struct btrfs_fs_info *info);
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
struct btrfs_device *dev);
-int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress);
/* dev-replace.c */
@@ -3648,7 +3653,7 @@ static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
/* reada.c */
struct reada_control {
- struct btrfs_root *root; /* tree to prefetch */
+ struct btrfs_fs_info *fs_info; /* tree to prefetch */
struct btrfs_key key_start;
struct btrfs_key key_end; /* exclusive */
atomic_t elems;
@@ -3660,7 +3665,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
int btrfs_reada_wait(void *handle);
void btrfs_reada_detach(void *handle);
int btree_readahead_hook(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb, u64 start, int err);
+ struct extent_buffer *eb, int err);
static inline int is_fstree(u64 rootid)
{
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 0fcf5f25d524..80982a83c9fd 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -72,12 +72,6 @@ static inline int btrfs_is_continuous_delayed_item(
return 0;
}
-static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
- struct btrfs_root *root)
-{
- return root->fs_info->delayed_root;
-}
-
static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
{
struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
@@ -535,7 +529,7 @@ static struct btrfs_delayed_item *__btrfs_next_delayed_item(
}
static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_item *item)
{
struct btrfs_block_rsv *src_rsv;
@@ -547,12 +541,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
return 0;
src_rsv = trans->block_rsv;
- dst_rsv = &root->fs_info->delayed_block_rsv;
+ dst_rsv = &fs_info->delayed_block_rsv;
- num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
if (!ret) {
- trace_btrfs_space_reservation(root->fs_info, "delayed_item",
+ trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid,
num_bytes, 1);
item->bytes_reserved = num_bytes;
@@ -561,7 +555,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
return ret;
}
-static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
+static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_item *item)
{
struct btrfs_block_rsv *rsv;
@@ -569,11 +563,11 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
if (!item->bytes_reserved)
return;
- rsv = &root->fs_info->delayed_block_rsv;
- trace_btrfs_space_reservation(root->fs_info, "delayed_item",
+ rsv = &fs_info->delayed_block_rsv;
+ trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid, item->bytes_reserved,
0);
- btrfs_block_rsv_release(root, rsv,
+ btrfs_block_rsv_release(fs_info, rsv,
item->bytes_reserved);
}
@@ -583,6 +577,7 @@ static int btrfs_delayed_inode_reserve_metadata(
struct inode *inode,
struct btrfs_delayed_node *node)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *src_rsv;
struct btrfs_block_rsv *dst_rsv;
u64 num_bytes;
@@ -590,9 +585,9 @@ static int btrfs_delayed_inode_reserve_metadata(
bool release = false;
src_rsv = trans->block_rsv;
- dst_rsv = &root->fs_info->delayed_block_rsv;
+ dst_rsv = &fs_info->delayed_block_rsv;
- num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
/*
* If our block_rsv is the delalloc block reserve then check and see if
@@ -640,7 +635,7 @@ static int btrfs_delayed_inode_reserve_metadata(
ret = -ENOSPC;
if (!ret) {
node->bytes_reserved = num_bytes;
- trace_btrfs_space_reservation(root->fs_info,
+ trace_btrfs_space_reservation(fs_info,
"delayed_inode",
btrfs_ino(inode),
num_bytes, 1);
@@ -664,21 +659,21 @@ static int btrfs_delayed_inode_reserve_metadata(
* how block rsvs. work.
*/
if (!ret) {
- trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
+ trace_btrfs_space_reservation(fs_info, "delayed_inode",
btrfs_ino(inode), num_bytes, 1);
node->bytes_reserved = num_bytes;
}
if (release) {
- trace_btrfs_space_reservation(root->fs_info, "delalloc",
+ trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), num_bytes, 0);
- btrfs_block_rsv_release(root, src_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
}
return ret;
}
-static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
+static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_node *node)
{
struct btrfs_block_rsv *rsv;
@@ -686,10 +681,10 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
if (!node->bytes_reserved)
return;
- rsv = &root->fs_info->delayed_block_rsv;
- trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
+ rsv = &fs_info->delayed_block_rsv;
+ trace_btrfs_space_reservation(fs_info, "delayed_inode",
node->inode_id, node->bytes_reserved, 0);
- btrfs_block_rsv_release(root, rsv,
+ btrfs_block_rsv_release(fs_info, rsv,
node->bytes_reserved);
node->bytes_reserved = 0;
}
@@ -702,6 +697,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
int free_space;
int total_data_size = 0, total_size = 0;
@@ -718,7 +714,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
BUG_ON(!path->nodes[0]);
leaf = path->nodes[0];
- free_space = btrfs_leaf_free_space(root, leaf);
+ free_space = btrfs_leaf_free_space(fs_info, leaf);
INIT_LIST_HEAD(&head);
next = item;
@@ -791,7 +787,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
curr->data_len);
slot++;
- btrfs_delayed_item_release_metadata(root, curr);
+ btrfs_delayed_item_release_metadata(fs_info, curr);
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
@@ -813,6 +809,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_item *delayed_item)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
char *ptr;
int ret;
@@ -830,7 +827,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
delayed_item->data_len);
btrfs_mark_buffer_dirty(leaf);
- btrfs_delayed_item_release_metadata(root, delayed_item);
+ btrfs_delayed_item_release_metadata(fs_info, delayed_item);
return 0;
}
@@ -882,6 +879,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -931,7 +929,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
goto out;
list_for_each_entry_safe(curr, next, &head, tree_list) {
- btrfs_delayed_item_release_metadata(root, curr);
+ btrfs_delayed_item_release_metadata(fs_info, curr);
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
}
@@ -1017,6 +1015,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_node *node)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
@@ -1073,7 +1072,7 @@ out:
no_iref:
btrfs_release_path(path);
err_out:
- btrfs_delayed_inode_release_metadata(root, node);
+ btrfs_delayed_inode_release_metadata(fs_info, node);
btrfs_release_delayed_inode(node);
return ret;
@@ -1138,7 +1137,7 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
* outstanding delayed items cleaned up.
*/
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int nr)
+ struct btrfs_fs_info *fs_info, int nr)
{
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
@@ -1156,9 +1155,9 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
- trans->block_rsv = &root->fs_info->delayed_block_rsv;
+ trans->block_rsv = &fs_info->delayed_block_rsv;
- delayed_root = btrfs_get_delayed_root(root);
+ delayed_root = fs_info->delayed_root;
curr_node = btrfs_first_delayed_node(delayed_root);
while (curr_node && (!count || (count && nr--))) {
@@ -1185,15 +1184,15 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
}
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- return __btrfs_run_delayed_items(trans, root, -1);
+ return __btrfs_run_delayed_items(trans, fs_info, -1);
}
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int nr)
+ struct btrfs_fs_info *fs_info, int nr)
{
- return __btrfs_run_delayed_items(trans, root, nr);
+ return __btrfs_run_delayed_items(trans, fs_info, nr);
}
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
@@ -1236,6 +1235,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
int btrfs_commit_inode_delayed_inode(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
@@ -1267,7 +1267,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
- trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
+ trans->block_rsv = &fs_info->delayed_block_rsv;
mutex_lock(&delayed_node->mutex);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
@@ -1280,8 +1280,8 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
btrfs_free_path(path);
trans->block_rsv = block_rsv;
trans_out:
- btrfs_end_transaction(trans, delayed_node->root);
- btrfs_btree_balance_dirty(delayed_node->root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
out:
btrfs_release_delayed_node(delayed_node);
@@ -1345,15 +1345,16 @@ again:
__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
trans->block_rsv = block_rsv;
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty_nodelay(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty_nodelay(root->fs_info);
release_path:
btrfs_release_path(path);
total_done++;
btrfs_release_prepared_delayed_node(delayed_node);
- if (async_work->nr == 0 || total_done < async_work->nr)
+ if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
+ total_done < async_work->nr)
goto again;
free_path:
@@ -1369,7 +1370,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
{
struct btrfs_async_delayed_work *async_work;
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+ if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
+ btrfs_workqueue_normal_congested(fs_info->delayed_workers))
return 0;
async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
@@ -1385,11 +1387,9 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return 0;
}
-void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
+void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
{
- struct btrfs_delayed_root *delayed_root;
- delayed_root = btrfs_get_delayed_root(root);
- WARN_ON(btrfs_first_delayed_node(delayed_root));
+ WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
}
static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
@@ -1405,12 +1405,9 @@ static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
return 0;
}
-void btrfs_balance_delayed_items(struct btrfs_root *root)
+void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
{
- struct btrfs_delayed_root *delayed_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
-
- delayed_root = btrfs_get_delayed_root(root);
+ struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
return;
@@ -1435,8 +1432,9 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
- int name_len, struct inode *dir,
+ struct btrfs_fs_info *fs_info,
+ const char *name, int name_len,
+ struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index)
{
@@ -1467,7 +1465,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
btrfs_set_stack_dir_type(dir_item, type);
memcpy((char *)(dir_item + 1), name, name_len);
- ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+ ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible
@@ -1478,7 +1476,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (unlikely(ret)) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
name_len, name, delayed_node->root->objectid,
delayed_node->inode_id, ret);
@@ -1491,7 +1489,7 @@ release_node:
return ret;
}
-static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
+static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_node *node,
struct btrfs_key *key)
{
@@ -1504,15 +1502,15 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
return 1;
}
- btrfs_delayed_item_release_metadata(root, item);
+ btrfs_delayed_item_release_metadata(fs_info, item);
btrfs_release_delayed_item(item);
mutex_unlock(&node->mutex);
return 0;
}
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *dir,
- u64 index)
+ struct btrfs_fs_info *fs_info,
+ struct inode *dir, u64 index)
{
struct btrfs_delayed_node *node;
struct btrfs_delayed_item *item;
@@ -1527,7 +1525,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
item_key.type = BTRFS_DIR_INDEX_KEY;
item_key.offset = index;
- ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
+ ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
if (!ret)
goto end;
@@ -1539,7 +1537,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
item->key = item_key;
- ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
+ ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible.
@@ -1549,7 +1547,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
if (unlikely(ret)) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
BUG();
@@ -1686,7 +1684,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
*
*/
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list, bool *emitted)
+ struct list_head *ins_list)
{
struct btrfs_dir_item *di;
struct btrfs_delayed_item *curr, *next;
@@ -1730,7 +1728,6 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
if (over)
return 1;
- *emitted = true;
}
return 0;
}
@@ -1861,6 +1858,7 @@ release_node:
int btrfs_delayed_delete_inode_ref(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_delayed_node *delayed_node;
/*
@@ -1868,8 +1866,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
* leads to enospc problems. This means we also can't do
* delayed inode refs
*/
- if (test_bit(BTRFS_FS_LOG_RECOVERING,
- &BTRFS_I(inode)->root->fs_info->flags))
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return -EAGAIN;
delayed_node = btrfs_get_or_create_delayed_node(inode);
@@ -1896,7 +1893,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
delayed_node->count++;
- atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
+ atomic_inc(&fs_info->delayed_root->items);
release_node:
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node);
@@ -1906,12 +1903,13 @@ release_node:
static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
{
struct btrfs_root *root = delayed_node->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr_item, *prev_item;
mutex_lock(&delayed_node->mutex);
curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
while (curr_item) {
- btrfs_delayed_item_release_metadata(root, curr_item);
+ btrfs_delayed_item_release_metadata(fs_info, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
@@ -1919,7 +1917,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
while (curr_item) {
- btrfs_delayed_item_release_metadata(root, curr_item);
+ btrfs_delayed_item_release_metadata(fs_info, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
@@ -1929,7 +1927,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
btrfs_release_delayed_iref(delayed_node);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
- btrfs_delayed_inode_release_metadata(root, delayed_node);
+ btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
btrfs_release_delayed_inode(delayed_node);
}
mutex_unlock(&delayed_node->mutex);
@@ -1976,14 +1974,11 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
}
}
-void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
+void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
{
- struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
- delayed_root = btrfs_get_delayed_root(root);
-
- curr_node = btrfs_first_delayed_node(delayed_root);
+ curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
while (curr_node) {
__btrfs_kill_delayed_node(curr_node);
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 2495b3d4075f..8a2bf5e3e4cf 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -99,23 +99,24 @@ static inline void btrfs_init_delayed_root(
}
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
- int name_len, struct inode *dir,
+ struct btrfs_fs_info *fs_info,
+ const char *name, int name_len,
+ struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *dir,
- u64 index);
+ struct btrfs_fs_info *fs_info,
+ struct inode *dir, u64 index);
int btrfs_inode_delayed_dir_index_count(struct inode *inode);
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int nr);
+ struct btrfs_fs_info *fs_info, int nr);
-void btrfs_balance_delayed_items(struct btrfs_root *root);
+void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info);
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct inode *inode);
@@ -134,7 +135,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode);
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
/* Used for clean the transaction */
-void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
+void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info);
/* Used for readdir() */
bool btrfs_readdir_get_delayed_items(struct inode *inode,
@@ -146,13 +147,13 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list, bool *emitted);
+ struct list_head *ins_list);
/* for init */
int __init btrfs_delayed_inode_init(void);
void btrfs_delayed_inode_exit(void);
/* for debugging */
-void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
+void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info);
#endif
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 8d93854a4b4f..ef724a5fc30e 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -189,6 +189,8 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
} else {
assert_spin_locked(&head->lock);
list_del(&ref->list);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
}
ref->in_tree = 0;
btrfs_put_delayed_ref(ref);
@@ -431,6 +433,15 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
exist->action = ref->action;
mod = -exist->ref_mod;
exist->ref_mod = ref->ref_mod;
+ if (ref->action == BTRFS_ADD_DELAYED_REF)
+ list_add_tail(&exist->add_list,
+ &href->ref_add_list);
+ else if (ref->action == BTRFS_DROP_DELAYED_REF) {
+ ASSERT(!list_empty(&exist->add_list));
+ list_del(&exist->add_list);
+ } else {
+ ASSERT(0);
+ }
} else
mod = -ref->ref_mod;
}
@@ -444,6 +455,8 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
add_tail:
list_add_tail(&ref->list, &href->ref_list);
+ if (ref->action == BTRFS_ADD_DELAYED_REF)
+ list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
trans->delayed_ref_updates++;
spin_unlock(&href->lock);
@@ -590,6 +603,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
INIT_LIST_HEAD(&head_ref->ref_list);
+ INIT_LIST_HEAD(&head_ref->ref_add_list);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
head_ref->qgroup_reserved = 0;
@@ -606,7 +620,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
- if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
+ if(btrfs_qgroup_trace_extent_nolock(fs_info,
delayed_refs, qrecord))
kfree(qrecord);
}
@@ -671,6 +685,8 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
+ INIT_LIST_HEAD(&ref->list);
+ INIT_LIST_HEAD(&ref->add_list);
full_ref = btrfs_delayed_node_to_tree_ref(ref);
full_ref->parent = parent;
@@ -726,6 +742,8 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
+ INIT_LIST_HEAD(&ref->list);
+ INIT_LIST_HEAD(&ref->add_list);
full_ref = btrfs_delayed_node_to_data_ref(ref);
full_ref->parent = parent;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 43f3629760e9..50947b5a9152 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -34,14 +34,14 @@
* ref_head. Must clean this mess up later.
*/
struct btrfs_delayed_ref_node {
- /*
- * ref_head use rb tree, stored in ref_root->href.
- * indexed by bytenr
- */
- struct rb_node rb_node;
-
/*data/tree ref use list, stored in ref_head->ref_list. */
struct list_head list;
+ /*
+ * If action is BTRFS_ADD_DELAYED_REF, also link this node to
+ * ref_head->ref_add_list, then we do not need to iterate the
+ * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
+ */
+ struct list_head add_list;
/* the starting bytenr of the extent */
u64 bytenr;
@@ -99,6 +99,8 @@ struct btrfs_delayed_ref_head {
spinlock_t lock;
struct list_head ref_list;
+ /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
+ struct list_head ref_add_list;
struct rb_node href_node;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 05169ef30596..5de280b9ad73 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -142,7 +142,7 @@ no_valid_dev_replace_entry_found:
* missing
*/
if (!dev_replace->srcdev &&
- !btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
+ !btrfs_test_opt(fs_info, DEGRADED)) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@@ -151,7 +151,7 @@ no_valid_dev_replace_entry_found:
src_devid);
}
if (!dev_replace->tgtdev &&
- !btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
+ !btrfs_test_opt(fs_info, DEGRADED)) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@@ -304,11 +304,11 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
dev_replace->cursor_left_last_write_of_item;
}
-int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
u64 srcdevid, char *srcdev_name, int read_src)
{
+ struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int ret;
struct btrfs_device *tgt_device = NULL;
@@ -316,14 +316,14 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
/* the disk copy procedure reuses the scrub code */
mutex_lock(&fs_info->volume_mutex);
- ret = btrfs_find_device_by_devspec(root, srcdevid,
+ ret = btrfs_find_device_by_devspec(fs_info, srcdevid,
srcdev_name, &src_device);
if (ret) {
mutex_unlock(&fs_info->volume_mutex);
return ret;
}
- ret = btrfs_init_dev_replace_tgtdev(root, tgtdev_name,
+ ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
src_device, &tgt_device);
mutex_unlock(&fs_info->volume_mutex);
if (ret)
@@ -335,7 +335,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
*/
trans = btrfs_attach_transaction(root);
if (!IS_ERR(trans)) {
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
} else if (PTR_ERR(trans) != -ENOENT) {
@@ -387,7 +387,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
if (ret)
btrfs_err(fs_info, "kobj add dev failed %d", ret);
- btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
/* force writing the updated state information to disk */
trans = btrfs_start_transaction(root, 0);
@@ -397,7 +397,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
goto leave;
}
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
WARN_ON(ret);
/* the disk copy procedure reuses the scrub code */
@@ -422,7 +422,7 @@ leave:
return ret;
}
-int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
+int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
int ret;
@@ -439,7 +439,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
args->start.tgtdev_name[0] == '\0')
return -EINVAL;
- ret = btrfs_dev_replace_start(root, args->start.tgtdev_name,
+ ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,
args->start.srcdev_name,
args->start.cont_reading_from_srcdev_mode);
@@ -501,25 +501,25 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished
*/
- ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1);
+ ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
if (ret) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;
}
- btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return PTR_ERR(trans);
}
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
WARN_ON(ret);
mutex_lock(&uuid_mutex);
/* keep away write_all_supers() during the finishing procedure */
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- mutex_lock(&root->fs_info->chunk_mutex);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->chunk_mutex);
btrfs_dev_replace_lock(dev_replace, 1);
dev_replace->replace_state =
scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
@@ -535,15 +535,15 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
src_device,
tgt_device);
} else {
- btrfs_err_in_rcu(root->fs_info,
- "btrfs_scrub_dev(%s, %llu, %s) failed %d",
- src_device->missing ? "<missing disk>" :
- rcu_str_deref(src_device->name),
- src_device->devid,
- rcu_str_deref(tgt_device->name), scrub_ret);
+ btrfs_err_in_rcu(fs_info,
+ "btrfs_scrub_dev(%s, %llu, %s) failed %d",
+ src_device->missing ? "<missing disk>" :
+ rcu_str_deref(src_device->name),
+ src_device->devid,
+ rcu_str_deref(tgt_device->name), scrub_ret);
btrfs_dev_replace_unlock(dev_replace, 1);
- mutex_unlock(&root->fs_info->chunk_mutex);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
mutex_unlock(&uuid_mutex);
if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
@@ -552,12 +552,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
return scrub_ret;
}
- btrfs_info_in_rcu(root->fs_info,
- "dev_replace from %s (devid %llu) to %s finished",
- src_device->missing ? "<missing disk>" :
- rcu_str_deref(src_device->name),
- src_device->devid,
- rcu_str_deref(tgt_device->name));
+ btrfs_info_in_rcu(fs_info,
+ "dev_replace from %s (devid %llu) to %s finished",
+ src_device->missing ? "<missing disk>" :
+ rcu_str_deref(src_device->name),
+ src_device->devid,
+ rcu_str_deref(tgt_device->name));
tgt_device->is_tgtdev_for_dev_replace = 0;
tgt_device->devid = src_device->devid;
src_device->devid = BTRFS_DEV_REPLACE_DEVID;
@@ -592,8 +592,8 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* superblock is scratched out so that it is no longer marked to
* belong to this filesystem.
*/
- mutex_unlock(&root->fs_info->chunk_mutex);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
mutex_unlock(&uuid_mutex);
/* replace the sysfs entry */
@@ -603,7 +603,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
/* write back the superblocks */
trans = btrfs_start_transaction(root, 0);
if (!IS_ERR(trans))
- btrfs_commit_transaction(trans, root);
+ btrfs_commit_transaction(trans);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
@@ -718,7 +718,7 @@ static u64 __btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return PTR_ERR(trans);
}
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
WARN_ON(ret);
if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index e922b42d91df..54ea12bda15b 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -25,9 +25,9 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
-int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
+int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
-int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
u64 srcdevid, char *srcdev_name, int read_src);
void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 0dc1a033275e..b039fe0c751a 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -38,6 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
const char *name,
int name_len)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *ptr;
struct btrfs_item *item;
@@ -46,10 +47,10 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
if (ret == -EEXIST) {
struct btrfs_dir_item *di;
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
- btrfs_extend_item(root, path, data_size);
+ btrfs_extend_item(fs_info, path, data_size);
} else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
@@ -79,7 +80,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
u32 data_size;
- BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root));
+ BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info));
key.objectid = objectid;
key.type = BTRFS_XATTR_ITEM_KEY;
@@ -172,8 +173,9 @@ second_insert:
}
btrfs_release_path(path);
- ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
- &disk_key, type, index);
+ ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
+ name_len, dir, &disk_key, type,
+ index);
out_free:
btrfs_free_path(path);
if (ret)
@@ -210,7 +212,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
if (ret > 0)
return NULL;
- return btrfs_match_dir_item_name(root, path, name, name_len);
+ return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
@@ -246,7 +248,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
}
/* we found an item, look for our name in the item */
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
if (di) {
/* our exact name was found */
ret = -EEXIST;
@@ -261,7 +263,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
leaf = path->nodes[0];
slot = path->slots[0];
if (data_size + btrfs_item_size_nr(leaf, slot) +
- sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) {
+ sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root->fs_info)) {
ret = -EOVERFLOW;
} else {
/* plenty of insertion room */
@@ -301,7 +303,7 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
return ERR_PTR(ret);
if (ret > 0)
return ERR_PTR(-ENOENT);
- return btrfs_match_dir_item_name(root, path, name, name_len);
+ return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
struct btrfs_dir_item *
@@ -342,7 +344,8 @@ btrfs_search_dir_index_item(struct btrfs_root *root,
if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY)
break;
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(root->fs_info, path,
+ name, name_len);
if (di)
return di;
@@ -371,7 +374,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
if (ret > 0)
return NULL;
- return btrfs_match_dir_item_name(root, path, name, name_len);
+ return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
/*
@@ -379,7 +382,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
* this walks through all the entries in a dir item and finds one
* for a specific name.
*/
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name, int name_len)
{
@@ -392,7 +395,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
- if (verify_dir_item(root, leaf, dir_item))
+ if (verify_dir_item(fs_info, leaf, dir_item))
return NULL;
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -442,12 +445,13 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
- btrfs_truncate_item(root, path, item_len - sub_item_len, 1);
+ btrfs_truncate_item(root->fs_info, path,
+ item_len - sub_item_len, 1);
}
return ret;
}
-int verify_dir_item(struct btrfs_root *root,
+int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item)
{
@@ -455,8 +459,7 @@ int verify_dir_item(struct btrfs_root *root,
u8 type = btrfs_dir_type(leaf, dir_item);
if (type >= BTRFS_FT_MAX) {
- btrfs_crit(root->fs_info, "invalid dir item type: %d",
- (int)type);
+ btrfs_crit(fs_info, "invalid dir item type: %d", (int)type);
return 1;
}
@@ -464,16 +467,16 @@ int verify_dir_item(struct btrfs_root *root,
namelen = XATTR_NAME_MAX;
if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
- btrfs_crit(root->fs_info, "invalid dir item name len: %u",
+ btrfs_crit(fs_info, "invalid dir item name len: %u",
(unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1;
}
/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
if ((btrfs_dir_data_len(leaf, dir_item) +
- btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
- btrfs_crit(root->fs_info,
- "invalid dir item name + data len: %u + %u",
+ btrfs_dir_name_len(leaf, dir_item)) >
+ BTRFS_MAX_XATTR_SIZE(fs_info)) {
+ btrfs_crit(fs_info, "invalid dir item name + data len: %u + %u",
(unsigned)btrfs_dir_name_len(leaf, dir_item),
(unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index fe10afd51e02..18004169552c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -68,15 +68,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
-static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages,
int mark);
-static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
struct extent_io_tree *pinned_extents);
-static int btrfs_cleanup_transaction(struct btrfs_root *root);
-static void btrfs_error_commit_super(struct btrfs_root *root);
+static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
+static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
/*
* btrfs_end_io_wq structs are used to do processing in task context when an IO
@@ -224,6 +224,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
int ret;
@@ -231,8 +232,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em) {
- em->bdev =
- BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
goto out;
}
@@ -247,7 +247,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
em->len = (u64)-1;
em->block_len = (u64)-1;
em->block_start = 0;
- em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
@@ -271,7 +271,7 @@ u32 btrfs_csum_data(char *data, u32 seed, size_t len)
return btrfs_crc32c(seed, data, len);
}
-void btrfs_csum_final(u32 crc, char *result)
+void btrfs_csum_final(u32 crc, u8 *result)
{
put_unaligned_le32(~crc, result);
}
@@ -440,7 +440,7 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
* helper to read a given tree block, doing retries as required when
* the checksums don't match and we have alternate mirrors to try.
*/
-static int btree_read_extent_buffer_pages(struct btrfs_root *root,
+static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
u64 parent_transid)
{
@@ -452,7 +452,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
int failed_mirror = 0;
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
+ io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
while (1) {
ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
btree_get_extent, mirror_num);
@@ -472,7 +472,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
break;
- num_copies = btrfs_num_copies(root->fs_info,
+ num_copies = btrfs_num_copies(fs_info,
eb->start, eb->len);
if (num_copies == 1)
break;
@@ -491,7 +491,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
}
if (failed && !ret && failed_mirror)
- repair_eb_io_failure(root, eb, failed_mirror);
+ repair_eb_io_failure(fs_info, eb, failed_mirror);
return ret;
}
@@ -545,47 +545,63 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
return ret;
}
-#define CORRUPT(reason, eb, root, slot) \
- btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu," \
- " root=%llu, slot=%d", \
- btrfs_header_level(eb) == 0 ? "leaf" : "node",\
+#define CORRUPT(reason, eb, root, slot) \
+ btrfs_crit(root->fs_info, \
+ "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", \
reason, btrfs_header_bytenr(eb), root->objectid, slot)
static noinline int check_leaf(struct btrfs_root *root,
struct extent_buffer *leaf)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_key leaf_key;
u32 nritems = btrfs_header_nritems(leaf);
int slot;
- if (nritems == 0) {
+ /*
+ * Extent buffers from a relocation tree have a owner field that
+ * corresponds to the subvolume tree they are based on. So just from an
+ * extent buffer alone we can not find out what is the id of the
+ * corresponding subvolume tree, so we can not figure out if the extent
+ * buffer corresponds to the root of the relocation tree or not. So skip
+ * this check for relocation trees.
+ */
+ if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
struct btrfs_root *check_root;
key.objectid = btrfs_header_owner(leaf);
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
- check_root = btrfs_get_fs_root(root->fs_info, &key, false);
+ check_root = btrfs_get_fs_root(fs_info, &key, false);
/*
* The only reason we also check NULL here is that during
* open_ctree() some roots has not yet been set up.
*/
if (!IS_ERR_OR_NULL(check_root)) {
+ struct extent_buffer *eb;
+
+ eb = btrfs_root_node(check_root);
/* if leaf is the root, then it's fine */
- if (leaf->start !=
- btrfs_root_bytenr(&check_root->root_item)) {
+ if (leaf != eb) {
CORRUPT("non-root leaf's nritems is 0",
- leaf, root, 0);
+ leaf, check_root, 0);
+ free_extent_buffer(eb);
return -EIO;
}
+ free_extent_buffer(eb);
}
return 0;
}
+ if (nritems == 0)
+ return 0;
+
/* Check the 0 item */
if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
- BTRFS_LEAF_DATA_SIZE(root)) {
+ BTRFS_LEAF_DATA_SIZE(fs_info)) {
CORRUPT("invalid item offset size pair", leaf, root, 0);
return -EIO;
}
@@ -624,7 +640,7 @@ static noinline int check_leaf(struct btrfs_root *root,
* all point outside of the leaf.
*/
if (btrfs_item_end_nr(leaf, slot) >
- BTRFS_LEAF_DATA_SIZE(root)) {
+ BTRFS_LEAF_DATA_SIZE(fs_info)) {
CORRUPT("slot end outside of leaf", leaf, root, slot);
return -EIO;
}
@@ -641,7 +657,7 @@ static int check_node(struct btrfs_root *root, struct extent_buffer *node)
u64 bytenr;
int ret = 0;
- if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
+ if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
btrfs_crit(root->fs_info,
"corrupt node: block %llu root %llu nritems %lu",
node->start, root->objectid, nr);
@@ -747,7 +763,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
err:
if (reads_done &&
test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(fs_info, eb, eb->start, ret);
+ btree_readahead_hook(fs_info, eb, ret);
if (ret) {
/*
@@ -772,7 +788,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
eb->read_mirror = failed_mirror;
atomic_dec(&eb->io_pages);
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(eb->fs_info, eb, eb->start, -EIO);
+ btree_readahead_hook(eb->fs_info, eb, -EIO);
return -EIO; /* we fixed nothing */
}
@@ -981,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1);
+ ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@@ -1004,6 +1020,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int async = check_async_write(inode, bio_flags);
int ret;
@@ -1012,23 +1029,22 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
*/
- ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
- bio, BTRFS_WQ_ENDIO_METADATA);
+ ret = btrfs_bio_wq_end_io(fs_info, bio,
+ BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
} else {
/*
* kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs
*/
- ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, bio, mirror_num, 0,
+ ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0,
bio_offset,
__btree_submit_bio_start,
__btree_submit_bio_done);
@@ -1146,12 +1162,12 @@ static const struct address_space_operations btree_aops = {
.set_page_dirty = btree_set_page_dirty,
};
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
+void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct extent_buffer *buf = NULL;
- struct inode *btree_inode = root->fs_info->btree_inode;
+ struct inode *btree_inode = fs_info->btree_inode;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
@@ -1159,15 +1175,15 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
free_extent_buffer(buf);
}
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
+int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
int mirror_num, struct extent_buffer **eb)
{
struct extent_buffer *buf = NULL;
- struct inode *btree_inode = root->fs_info->btree_inode;
+ struct inode *btree_inode = fs_info->btree_inode;
struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
int ret;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return 0;
@@ -1191,19 +1207,13 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
return 0;
}
-struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
- u64 bytenr)
-{
- return find_extent_buffer(fs_info, bytenr);
-}
-
-struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
- u64 bytenr)
+struct extent_buffer *btrfs_find_create_tree_block(
+ struct btrfs_fs_info *fs_info,
+ u64 bytenr)
{
- if (btrfs_is_testing(root->fs_info))
- return alloc_test_extent_buffer(root->fs_info, bytenr,
- root->nodesize);
- return alloc_extent_buffer(root->fs_info, bytenr);
+ if (btrfs_is_testing(fs_info))
+ return alloc_test_extent_buffer(fs_info, bytenr);
+ return alloc_extent_buffer(fs_info, bytenr);
}
@@ -1219,17 +1229,17 @@ int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
buf->start, buf->start + buf->len - 1);
}
-struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
+struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 parent_transid)
{
struct extent_buffer *buf = NULL;
int ret;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return buf;
- ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
+ ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
if (ret) {
free_extent_buffer(buf);
return ERR_PTR(ret);
@@ -1283,16 +1293,12 @@ btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
kfree(writers);
}
-static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
- struct btrfs_root *root, struct btrfs_fs_info *fs_info,
+static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
u64 objectid)
{
bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
root->node = NULL;
root->commit_root = NULL;
- root->sectorsize = sectorsize;
- root->nodesize = nodesize;
- root->stripesize = stripesize;
root->state = 0;
root->orphan_cleanup_state = 0;
@@ -1370,8 +1376,7 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
- u32 sectorsize, u32 nodesize)
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
@@ -1381,9 +1386,9 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
root = btrfs_alloc_root(fs_info, GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
+
/* We don't use the stripesize in selftest, set it as sectorsize */
- __setup_root(nodesize, sectorsize, sectorsize, root, fs_info,
- BTRFS_ROOT_TREE_OBJECTID);
+ __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
root->alloc_bytenr = 0;
return root;
@@ -1405,8 +1410,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(tree_root->nodesize, tree_root->sectorsize,
- tree_root->stripesize, root, fs_info, objectid);
+ __setup_root(root, fs_info, objectid);
root->root_key.objectid = objectid;
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
root->root_key.offset = 0;
@@ -1418,18 +1422,15 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
goto fail;
}
- memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+ memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(leaf, objectid);
root->node = leaf;
- write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
- BTRFS_FSID_SIZE);
- write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
- btrfs_header_chunk_tree_uuid(leaf),
- BTRFS_UUID_SIZE);
+ write_extent_buffer_fsid(leaf, fs_info->fsid);
+ write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
btrfs_mark_buffer_dirty(leaf);
root->commit_root = btrfs_root_node(root);
@@ -1474,16 +1475,13 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
- struct btrfs_root *tree_root = fs_info->tree_root;
struct extent_buffer *leaf;
root = btrfs_alloc_root(fs_info, GFP_NOFS);
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(tree_root->nodesize, tree_root->sectorsize,
- tree_root->stripesize, root, fs_info,
- BTRFS_TREE_LOG_OBJECTID);
+ __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -1505,15 +1503,14 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
return ERR_CAST(leaf);
}
- memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+ memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
root->node = leaf;
- write_extent_buffer(root->node, root->fs_info->fsid,
- btrfs_header_fsid(), BTRFS_FSID_SIZE);
+ write_extent_buffer_fsid(root->node, fs_info->fsid);
btrfs_mark_buffer_dirty(root->node);
btrfs_tree_unlock(root->node);
return root;
@@ -1535,10 +1532,11 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log_root;
struct btrfs_inode_item *inode_item;
- log_root = alloc_log_tree(trans, root->fs_info);
+ log_root = alloc_log_tree(trans, fs_info);
if (IS_ERR(log_root))
return PTR_ERR(log_root);
@@ -1549,7 +1547,8 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
btrfs_set_stack_inode_generation(inode_item, 1);
btrfs_set_stack_inode_size(inode_item, 3);
btrfs_set_stack_inode_nlink(inode_item, 1);
- btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
+ btrfs_set_stack_inode_nbytes(inode_item,
+ fs_info->nodesize);
btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
btrfs_set_root_node(&log_root->root_item, log_root->node);
@@ -1581,8 +1580,7 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
goto alloc_fail;
}
- __setup_root(tree_root->nodesize, tree_root->sectorsize,
- tree_root->stripesize, root, fs_info, key->objectid);
+ __setup_root(root, fs_info, key->objectid);
ret = btrfs_find_root(tree_root, key, path,
&root->root_item, &root->root_key);
@@ -1593,7 +1591,8 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
}
generation = btrfs_root_generation(&root->root_item);
- root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
+ root->node = read_tree_block(fs_info,
+ btrfs_root_bytenr(&root->root_item),
generation);
if (IS_ERR(root->node)) {
ret = PTR_ERR(root->node);
@@ -1848,6 +1847,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
static int cleaner_kthread(void *arg)
{
struct btrfs_root *root = arg;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int again;
struct btrfs_trans_handle *trans;
@@ -1855,40 +1855,40 @@ static int cleaner_kthread(void *arg)
again = 0;
/* Make the cleaner go to sleep early. */
- if (btrfs_need_cleaner_sleep(root))
+ if (btrfs_need_cleaner_sleep(fs_info))
goto sleep;
/*
* Do not do anything if we might cause open_ctree() to block
* before we have finished mounting the filesystem.
*/
- if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags))
+ if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
goto sleep;
- if (!mutex_trylock(&root->fs_info->cleaner_mutex))
+ if (!mutex_trylock(&fs_info->cleaner_mutex))
goto sleep;
/*
* Avoid the problem that we change the status of the fs
* during the above check and trylock.
*/
- if (btrfs_need_cleaner_sleep(root)) {
- mutex_unlock(&root->fs_info->cleaner_mutex);
+ if (btrfs_need_cleaner_sleep(fs_info)) {
+ mutex_unlock(&fs_info->cleaner_mutex);
goto sleep;
}
- mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
- btrfs_run_delayed_iputs(root);
- mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+ mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
+ btrfs_run_delayed_iputs(fs_info);
+ mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
again = btrfs_clean_one_deleted_snapshot(root);
- mutex_unlock(&root->fs_info->cleaner_mutex);
+ mutex_unlock(&fs_info->cleaner_mutex);
/*
* The defragger has dealt with the R/O remount and umount,
* needn't do anything special here.
*/
- btrfs_run_defrag_inodes(root->fs_info);
+ btrfs_run_defrag_inodes(fs_info);
/*
* Acquires fs_info->delete_unused_bgs_mutex to avoid racing
@@ -1898,7 +1898,7 @@ static int cleaner_kthread(void *arg)
* can't hold, nor need to, fs_info->cleaner_mutex when deleting
* unused block groups.
*/
- btrfs_delete_unused_bgs(root->fs_info);
+ btrfs_delete_unused_bgs(fs_info);
sleep:
if (!again) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -1922,15 +1922,15 @@ sleep:
trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT)
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"cleaner transaction attach returned %ld",
PTR_ERR(trans));
} else {
int ret;
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"cleaner open transaction commit returned %d",
ret);
}
@@ -1941,6 +1941,7 @@ sleep:
static int transaction_kthread(void *arg)
{
struct btrfs_root *root = arg;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_transaction *cur;
u64 transid;
@@ -1950,26 +1951,26 @@ static int transaction_kthread(void *arg)
do {
cannot_commit = false;
- delay = HZ * root->fs_info->commit_interval;
- mutex_lock(&root->fs_info->transaction_kthread_mutex);
+ delay = HZ * fs_info->commit_interval;
+ mutex_lock(&fs_info->transaction_kthread_mutex);
- spin_lock(&root->fs_info->trans_lock);
- cur = root->fs_info->running_transaction;
+ spin_lock(&fs_info->trans_lock);
+ cur = fs_info->running_transaction;
if (!cur) {
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
goto sleep;
}
now = get_seconds();
if (cur->state < TRANS_STATE_BLOCKED &&
(now < cur->start_time ||
- now - cur->start_time < root->fs_info->commit_interval)) {
- spin_unlock(&root->fs_info->trans_lock);
+ now - cur->start_time < fs_info->commit_interval)) {
+ spin_unlock(&fs_info->trans_lock);
delay = HZ * 5;
goto sleep;
}
transid = cur->transid;
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
/* If the file system is aborted, this will always fail. */
trans = btrfs_attach_transaction(root);
@@ -1979,20 +1980,20 @@ static int transaction_kthread(void *arg)
goto sleep;
}
if (transid == trans->transid) {
- btrfs_commit_transaction(trans, root);
+ btrfs_commit_transaction(trans);
} else {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
}
sleep:
- wake_up_process(root->fs_info->cleaner_kthread);
- mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+ wake_up_process(fs_info->cleaner_kthread);
+ mutex_unlock(&fs_info->transaction_kthread_mutex);
if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
- &root->fs_info->fs_state)))
- btrfs_cleanup_transaction(root);
+ &fs_info->fs_state)))
+ btrfs_cleanup_transaction(fs_info);
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
- (!btrfs_transaction_blocked(root->fs_info) ||
+ (!btrfs_transaction_blocked(fs_info) ||
cannot_commit))
schedule_timeout(delay);
__set_current_state(TASK_RUNNING);
@@ -2279,8 +2280,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_free_log_root_tree(NULL, fs_info);
- btrfs_destroy_pinned_extent(fs_info->tree_root,
- fs_info->pinned_extents);
+ btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
}
}
@@ -2306,33 +2306,31 @@ static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
init_waitqueue_head(&fs_info->balance_wait_q);
}
-static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_root *tree_root)
+static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
{
- fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
- set_nlink(fs_info->btree_inode, 1);
+ struct inode *inode = fs_info->btree_inode;
+
+ inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+ set_nlink(inode, 1);
/*
* we set the i_size on the btree inode to the max possible int.
* the real end of the address space is determined by all of
* the devices in the system
*/
- fs_info->btree_inode->i_size = OFFSET_MAX;
- fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
+ inode->i_size = OFFSET_MAX;
+ inode->i_mapping->a_ops = &btree_aops;
- RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
- extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
- fs_info->btree_inode->i_mapping);
- BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
- extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
+ RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
+ extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping);
+ BTRFS_I(inode)->io_tree.track_uptodate = 0;
+ extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
- BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
+ BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
- BTRFS_I(fs_info->btree_inode)->root = tree_root;
- memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
- sizeof(struct btrfs_key));
- set_bit(BTRFS_INODE_DUMMY,
- &BTRFS_I(fs_info->btree_inode)->runtime_flags);
- btrfs_insert_inode_hash(fs_info->btree_inode);
+ BTRFS_I(inode)->root = fs_info->tree_root;
+ memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
+ set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
+ btrfs_insert_inode_hash(inode);
}
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
@@ -2453,7 +2451,6 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices)
{
int ret;
- struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *log_tree_root;
struct btrfs_super_block *disk_super = fs_info->super_copy;
u64 bytenr = btrfs_super_log_root(disk_super);
@@ -2467,12 +2464,10 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
if (!log_tree_root)
return -ENOMEM;
- __setup_root(tree_root->nodesize, tree_root->sectorsize,
- tree_root->stripesize, log_tree_root, fs_info,
- BTRFS_TREE_LOG_OBJECTID);
+ __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
- log_tree_root->node = read_tree_block(tree_root, bytenr,
- fs_info->generation + 1);
+ log_tree_root->node = read_tree_block(fs_info, bytenr,
+ fs_info->generation + 1);
if (IS_ERR(log_tree_root->node)) {
btrfs_warn(fs_info, "failed to read log tree");
ret = PTR_ERR(log_tree_root->node);
@@ -2487,15 +2482,15 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
/* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
if (ret) {
- btrfs_handle_fs_error(tree_root->fs_info, ret,
- "Failed to recover log tree");
+ btrfs_handle_fs_error(fs_info, ret,
+ "Failed to recover log tree");
free_extent_buffer(log_tree_root->node);
kfree(log_tree_root);
return ret;
}
if (fs_info->sb->s_flags & MS_RDONLY) {
- ret = btrfs_commit_super(tree_root);
+ ret = btrfs_commit_super(fs_info);
if (ret)
return ret;
}
@@ -2503,13 +2498,15 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
return 0;
}
-static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
- struct btrfs_root *tree_root)
+static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *root;
struct btrfs_key location;
int ret;
+ BUG_ON(!fs_info->tree_root);
+
location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
location.type = BTRFS_ROOT_ITEM_KEY;
location.offset = 0;
@@ -2720,7 +2717,7 @@ int open_ctree(struct super_block *sb,
sb->s_blocksize_bits = blksize_bits(4096);
sb->s_bdi = &fs_info->bdi;
- btrfs_init_btree_inode(fs_info, tree_root);
+ btrfs_init_btree_inode(fs_info);
spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT;
@@ -2758,14 +2755,18 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->pinned_chunks);
+ /* Usable values until the real ones are cached from the superblock */
+ fs_info->nodesize = 4096;
+ fs_info->sectorsize = 4096;
+ fs_info->stripesize = 4096;
+
ret = btrfs_alloc_stripe_hash_table(fs_info);
if (ret) {
err = ret;
goto fail_alloc;
}
- __setup_root(4096, 4096, 4096, tree_root,
- fs_info, BTRFS_ROOT_TREE_OBJECTID);
+ __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
invalidate_bdev(fs_devices->latest_bdev);
@@ -2829,7 +2830,7 @@ int open_ctree(struct super_block *sb,
*/
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
- ret = btrfs_parse_options(tree_root, options, sb->s_flags);
+ ret = btrfs_parse_options(fs_info, options, sb->s_flags);
if (ret) {
err = ret;
goto fail_alloc;
@@ -2847,7 +2848,7 @@ int open_ctree(struct super_block *sb,
features = btrfs_super_incompat_flags(disk_super);
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
- if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
+ if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
@@ -2870,6 +2871,11 @@ int open_ctree(struct super_block *sb,
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
+ /* Cache block sizes */
+ fs_info->nodesize = nodesize;
+ fs_info->sectorsize = sectorsize;
+ fs_info->stripesize = stripesize;
+
/*
* mixed block groups end up with duplicate but slightly offset
* extent buffers for the same range. It leads to corruptions
@@ -2910,15 +2916,11 @@ int open_ctree(struct super_block *sb,
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
SZ_4M / PAGE_SIZE);
- tree_root->nodesize = nodesize;
- tree_root->sectorsize = sectorsize;
- tree_root->stripesize = stripesize;
-
sb->s_blocksize = sectorsize;
sb->s_blocksize_bits = blksize_bits(sectorsize);
mutex_lock(&fs_info->chunk_mutex);
- ret = btrfs_read_sys_array(tree_root);
+ ret = btrfs_read_sys_array(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
if (ret) {
btrfs_err(fs_info, "failed to read the system array: %d", ret);
@@ -2927,10 +2929,9 @@ int open_ctree(struct super_block *sb,
generation = btrfs_super_chunk_root_generation(disk_super);
- __setup_root(nodesize, sectorsize, stripesize, chunk_root,
- fs_info, BTRFS_CHUNK_TREE_OBJECTID);
+ __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
- chunk_root->node = read_tree_block(chunk_root,
+ chunk_root->node = read_tree_block(fs_info,
btrfs_super_chunk_root(disk_super),
generation);
if (IS_ERR(chunk_root->node) ||
@@ -2947,7 +2948,7 @@ int open_ctree(struct super_block *sb,
read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
- ret = btrfs_read_chunk_tree(chunk_root);
+ ret = btrfs_read_chunk_tree(fs_info);
if (ret) {
btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
goto fail_tree_roots;
@@ -2967,7 +2968,7 @@ int open_ctree(struct super_block *sb,
retry_root_backup:
generation = btrfs_super_generation(disk_super);
- tree_root->node = read_tree_block(tree_root,
+ tree_root->node = read_tree_block(fs_info,
btrfs_super_root(disk_super),
generation);
if (IS_ERR(tree_root->node) ||
@@ -2995,7 +2996,7 @@ retry_root_backup:
mutex_unlock(&tree_root->objectid_mutex);
- ret = btrfs_read_roots(fs_info, tree_root);
+ ret = btrfs_read_roots(fs_info);
if (ret)
goto recovery_tree_root;
@@ -3048,7 +3049,7 @@ retry_root_backup:
goto fail_sysfs;
}
- ret = btrfs_read_block_groups(fs_info->extent_root);
+ ret = btrfs_read_block_groups(fs_info);
if (ret) {
btrfs_err(fs_info, "failed to read block groups: %d", ret);
goto fail_sysfs;
@@ -3076,8 +3077,8 @@ retry_root_backup:
if (IS_ERR(fs_info->transaction_kthread))
goto fail_cleaner;
- if (!btrfs_test_opt(tree_root->fs_info, SSD) &&
- !btrfs_test_opt(tree_root->fs_info, NOSSD) &&
+ if (!btrfs_test_opt(fs_info, SSD) &&
+ !btrfs_test_opt(fs_info, NOSSD) &&
!fs_info->fs_devices->rotating) {
btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
btrfs_set_opt(fs_info->mount_opt, SSD);
@@ -3090,9 +3091,9 @@ retry_root_backup:
btrfs_apply_pending_changes(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) {
- ret = btrfsic_mount(tree_root, fs_devices,
- btrfs_test_opt(tree_root->fs_info,
+ if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
+ ret = btrfsic_mount(fs_info, fs_devices,
+ btrfs_test_opt(fs_info,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
1 : 0,
fs_info->check_integrity_print_mask);
@@ -3108,7 +3109,7 @@ retry_root_backup:
/* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 &&
- !btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) {
+ !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) {
err = ret;
@@ -3116,7 +3117,7 @@ retry_root_backup:
}
}
- ret = btrfs_find_orphan_roots(tree_root);
+ ret = btrfs_find_orphan_roots(fs_info);
if (ret)
goto fail_qgroup;
@@ -3164,19 +3165,19 @@ retry_root_backup:
if (ret) {
btrfs_warn(fs_info,
"failed to clear free space tree: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
}
- if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
+ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info, "creating free space tree");
ret = btrfs_create_free_space_tree(fs_info);
if (ret) {
btrfs_warn(fs_info,
"failed to create free space tree: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
}
@@ -3185,7 +3186,7 @@ retry_root_backup:
if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
(ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
up_read(&fs_info->cleanup_work_sem);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
up_read(&fs_info->cleanup_work_sem);
@@ -3193,14 +3194,14 @@ retry_root_backup:
ret = btrfs_resume_balance_async(fs_info);
if (ret) {
btrfs_warn(fs_info, "failed to resume balance: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
ret = btrfs_resume_dev_replace_async(fs_info);
if (ret) {
btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
@@ -3212,10 +3213,10 @@ retry_root_backup:
if (ret) {
btrfs_warn(fs_info,
"failed to create the UUID tree: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
- } else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) ||
+ } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
fs_info->generation !=
btrfs_super_uuid_tree_generation(disk_super)) {
btrfs_info(fs_info, "checking UUID tree");
@@ -3223,7 +3224,7 @@ retry_root_backup:
if (ret) {
btrfs_warn(fs_info,
"failed to check the UUID tree: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
} else {
@@ -3243,7 +3244,7 @@ fail_qgroup:
btrfs_free_qgroup_config(fs_info);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
- btrfs_cleanup_transaction(fs_info->tree_root);
+ btrfs_cleanup_transaction(fs_info);
btrfs_free_fs_roots(fs_info);
fail_cleaner:
kthread_stop(fs_info->cleaner_kthread);
@@ -3291,7 +3292,7 @@ fail:
return err;
recovery_tree_root:
- if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT))
+ if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
goto fail_tree_roots;
free_root_pointers(fs_info, 0);
@@ -3317,7 +3318,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
struct btrfs_device *device = (struct btrfs_device *)
bh->b_private;
- btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
+ btrfs_warn_rl_in_rcu(device->fs_info,
"lost page write due to IO error on %s",
rcu_str_deref(device->name));
/* note, we don't set_buffer_write_io_error because we have
@@ -3462,7 +3463,7 @@ static int write_dev_supers(struct btrfs_device *device,
bh = __getblk(device->bdev, bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
if (!bh) {
- btrfs_err(device->dev_root->fs_info,
+ btrfs_err(device->fs_info,
"couldn't get super buffer head for bytenr %llu",
bytenr);
errors++;
@@ -3695,7 +3696,7 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
return num_tolerated_disk_barrier_failures;
}
-static int write_all_supers(struct btrfs_root *root, int max_mirrors)
+static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
{
struct list_head *head;
struct btrfs_device *dev;
@@ -3707,23 +3708,23 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
int total_errors = 0;
u64 flags;
- do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER);
- backup_super_roots(root->fs_info);
+ do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
+ backup_super_roots(fs_info);
- sb = root->fs_info->super_for_commit;
+ sb = fs_info->super_for_commit;
dev_item = &sb->dev_item;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- head = &root->fs_info->fs_devices->devices;
- max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ head = &fs_info->fs_devices->devices;
+ max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
if (do_barriers) {
- ret = barrier_all_devices(root->fs_info);
+ ret = barrier_all_devices(fs_info);
if (ret) {
mutex_unlock(
- &root->fs_info->fs_devices->device_list_mutex);
- btrfs_handle_fs_error(root->fs_info, ret,
- "errors while submitting device barriers.");
+ &fs_info->fs_devices->device_list_mutex);
+ btrfs_handle_fs_error(fs_info, ret,
+ "errors while submitting device barriers.");
return ret;
}
}
@@ -3757,13 +3758,14 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
total_errors++;
}
if (total_errors > max_errors) {
- btrfs_err(root->fs_info, "%d errors while writing supers",
- total_errors);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ btrfs_err(fs_info, "%d errors while writing supers",
+ total_errors);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
/* FUA is masked off if unsupported and can't be the reason */
- btrfs_handle_fs_error(root->fs_info, -EIO,
- "%d errors while writing supers", total_errors);
+ btrfs_handle_fs_error(fs_info, -EIO,
+ "%d errors while writing supers",
+ total_errors);
return -EIO;
}
@@ -3778,19 +3780,20 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
if (ret)
total_errors++;
}
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) {
- btrfs_handle_fs_error(root->fs_info, -EIO,
- "%d errors while writing supers", total_errors);
+ btrfs_handle_fs_error(fs_info, -EIO,
+ "%d errors while writing supers",
+ total_errors);
return -EIO;
}
return 0;
}
int write_ctree_super(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int max_mirrors)
+ struct btrfs_fs_info *fs_info, int max_mirrors)
{
- return write_all_supers(root, max_mirrors);
+ return write_all_supers(fs_info, max_mirrors);
}
/* Drop a fs root from the radix tree and free it. */
@@ -3826,7 +3829,7 @@ static void free_fs_root(struct btrfs_root *root)
{
iput(root->ino_cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
- btrfs_free_block_rsv(root, root->orphan_block_rsv);
+ btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
@@ -3896,28 +3899,29 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
return err;
}
-int btrfs_commit_super(struct btrfs_root *root)
+int btrfs_commit_super(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
- mutex_lock(&root->fs_info->cleaner_mutex);
- btrfs_run_delayed_iputs(root);
- mutex_unlock(&root->fs_info->cleaner_mutex);
- wake_up_process(root->fs_info->cleaner_kthread);
+ mutex_lock(&fs_info->cleaner_mutex);
+ btrfs_run_delayed_iputs(fs_info);
+ mutex_unlock(&fs_info->cleaner_mutex);
+ wake_up_process(fs_info->cleaner_kthread);
/* wait until ongoing cleanup work done */
- down_write(&root->fs_info->cleanup_work_sem);
- up_write(&root->fs_info->cleanup_work_sem);
+ down_write(&fs_info->cleanup_work_sem);
+ up_write(&fs_info->cleanup_work_sem);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- return btrfs_commit_transaction(trans, root);
+ return btrfs_commit_transaction(trans);
}
-void close_ctree(struct btrfs_root *root)
+void close_ctree(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *root = fs_info->tree_root;
int ret;
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
@@ -3952,15 +3956,15 @@ void close_ctree(struct btrfs_root *root)
* block groups queued for removal, the deletion will be
* skipped when we quit the cleaner thread.
*/
- btrfs_delete_unused_bgs(root->fs_info);
+ btrfs_delete_unused_bgs(fs_info);
- ret = btrfs_commit_super(root);
+ ret = btrfs_commit_super(fs_info);
if (ret)
btrfs_err(fs_info, "commit super ret %d", ret);
}
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
- btrfs_error_commit_super(root);
+ btrfs_error_commit_super(fs_info);
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
@@ -3996,8 +4000,8 @@ void close_ctree(struct btrfs_root *root)
iput(fs_info->btree_inode);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY))
- btrfsic_unmount(root, fs_info->fs_devices);
+ if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
+ btrfsic_unmount(fs_info->fs_devices);
#endif
btrfs_close_devices(fs_info->fs_devices);
@@ -4014,7 +4018,7 @@ void close_ctree(struct btrfs_root *root)
__btrfs_free_block_rsv(root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
while (!list_empty(&fs_info->pinned_chunks)) {
struct extent_map *em;
@@ -4023,7 +4027,7 @@ void close_ctree(struct btrfs_root *root)
list_del_init(&em->list);
free_extent_map(em);
}
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
}
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
@@ -4045,6 +4049,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
+ struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
u64 transid = btrfs_header_generation(buf);
int was_dirty;
@@ -4059,24 +4064,25 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
return;
#endif
root = BTRFS_I(buf->pages[0]->mapping->host)->root;
+ fs_info = root->fs_info;
btrfs_assert_tree_locked(buf);
- if (transid != root->fs_info->generation)
+ if (transid != fs_info->generation)
WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
- buf->start, transid, root->fs_info->generation);
+ buf->start, transid, fs_info->generation);
was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty)
- __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
+ __percpu_counter_add(&fs_info->dirty_metadata_bytes,
buf->len,
- root->fs_info->dirty_metadata_batch);
+ fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
- btrfs_print_leaf(root, buf);
+ btrfs_print_leaf(fs_info, buf);
ASSERT(0);
}
#endif
}
-static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
+static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
int flush_delayed)
{
/*
@@ -4089,30 +4095,31 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
return;
if (flush_delayed)
- btrfs_balance_delayed_items(root);
+ btrfs_balance_delayed_items(fs_info);
- ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
+ ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
BTRFS_DIRTY_METADATA_THRESH);
if (ret > 0) {
- balance_dirty_pages_ratelimited(
- root->fs_info->btree_inode->i_mapping);
+ balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
}
}
-void btrfs_btree_balance_dirty(struct btrfs_root *root)
+void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
{
- __btrfs_btree_balance_dirty(root, 1);
+ __btrfs_btree_balance_dirty(fs_info, 1);
}
-void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
{
- __btrfs_btree_balance_dirty(root, 0);
+ __btrfs_btree_balance_dirty(fs_info, 0);
}
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
- return btree_read_extent_buffer_pages(root, buf, parent_transid);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
}
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@ -4263,17 +4270,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
return ret;
}
-static void btrfs_error_commit_super(struct btrfs_root *root)
+static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
{
- mutex_lock(&root->fs_info->cleaner_mutex);
- btrfs_run_delayed_iputs(root);
- mutex_unlock(&root->fs_info->cleaner_mutex);
+ mutex_lock(&fs_info->cleaner_mutex);
+ btrfs_run_delayed_iputs(fs_info);
+ mutex_unlock(&fs_info->cleaner_mutex);
- down_write(&root->fs_info->cleanup_work_sem);
- up_write(&root->fs_info->cleanup_work_sem);
+ down_write(&fs_info->cleanup_work_sem);
+ up_write(&fs_info->cleanup_work_sem);
/* cleanup FS via transaction */
- btrfs_cleanup_transaction(root);
+ btrfs_cleanup_transaction(fs_info);
}
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
@@ -4316,7 +4323,7 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -4328,7 +4335,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
spin_lock(&delayed_refs->lock);
if (atomic_read(&delayed_refs->num_entries) == 0) {
spin_unlock(&delayed_refs->lock);
- btrfs_info(root->fs_info, "delayed_refs has NO entry");
+ btrfs_info(fs_info, "delayed_refs has NO entry");
return ret;
}
@@ -4354,6 +4361,8 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
list) {
ref->in_tree = 0;
list_del(&ref->list);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
atomic_dec(&delayed_refs->num_entries);
btrfs_put_delayed_ref(ref);
}
@@ -4371,7 +4380,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
mutex_unlock(&head->mutex);
if (pin_bytes)
- btrfs_pin_extent(root, head->node.bytenr,
+ btrfs_pin_extent(fs_info, head->node.bytenr,
head->node.num_bytes, 1);
btrfs_put_delayed_ref(&head->node);
cond_resched();
@@ -4435,7 +4444,7 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
spin_unlock(&fs_info->delalloc_root_lock);
}
-static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages,
int mark)
{
@@ -4452,8 +4461,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
clear_extent_bits(dirty_pages, start, end, mark);
while (start <= end) {
- eb = btrfs_find_tree_block(root->fs_info, start);
- start += root->nodesize;
+ eb = find_extent_buffer(fs_info, start);
+ start += fs_info->nodesize;
if (!eb)
continue;
wait_on_extent_buffer_writeback(eb);
@@ -4468,7 +4477,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
return ret;
}
-static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
struct extent_io_tree *pinned_extents)
{
struct extent_io_tree *unpin;
@@ -4486,15 +4495,15 @@ again:
break;
clear_extent_dirty(unpin, start, end);
- btrfs_error_unpin_extent_range(root, start, end);
+ btrfs_error_unpin_extent_range(fs_info, start, end);
cond_resched();
}
if (loop) {
- if (unpin == &root->fs_info->freed_extents[0])
- unpin = &root->fs_info->freed_extents[1];
+ if (unpin == &fs_info->freed_extents[0])
+ unpin = &fs_info->freed_extents[1];
else
- unpin = &root->fs_info->freed_extents[0];
+ unpin = &fs_info->freed_extents[0];
loop = false;
goto again;
}
@@ -4517,7 +4526,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
}
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache;
@@ -4527,8 +4536,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_block_group_cache,
dirty_list);
if (!cache) {
- btrfs_err(root->fs_info,
- "orphan block group dirty_bgs list");
+ btrfs_err(fs_info, "orphan block group dirty_bgs list");
spin_unlock(&cur_trans->dirty_bgs_lock);
return;
}
@@ -4556,8 +4564,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_block_group_cache,
io_list);
if (!cache) {
- btrfs_err(root->fs_info,
- "orphan block group on io_bgs list");
+ btrfs_err(fs_info, "orphan block group on io_bgs list");
return;
}
@@ -4570,27 +4577,27 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
}
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- btrfs_cleanup_dirty_bgs(cur_trans, root);
+ btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
- btrfs_destroy_delayed_refs(cur_trans, root);
+ btrfs_destroy_delayed_refs(cur_trans, fs_info);
cur_trans->state = TRANS_STATE_COMMIT_START;
- wake_up(&root->fs_info->transaction_blocked_wait);
+ wake_up(&fs_info->transaction_blocked_wait);
cur_trans->state = TRANS_STATE_UNBLOCKED;
- wake_up(&root->fs_info->transaction_wait);
+ wake_up(&fs_info->transaction_wait);
- btrfs_destroy_delayed_inodes(root);
- btrfs_assert_delayed_root_empty(root);
+ btrfs_destroy_delayed_inodes(fs_info);
+ btrfs_assert_delayed_root_empty(fs_info);
- btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
+ btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
EXTENT_DIRTY);
- btrfs_destroy_pinned_extent(root,
- root->fs_info->pinned_extents);
+ btrfs_destroy_pinned_extent(fs_info,
+ fs_info->pinned_extents);
cur_trans->state =TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait);
@@ -4601,27 +4608,27 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
*/
}
-static int btrfs_cleanup_transaction(struct btrfs_root *root)
+static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
{
struct btrfs_transaction *t;
- mutex_lock(&root->fs_info->transaction_kthread_mutex);
+ mutex_lock(&fs_info->transaction_kthread_mutex);
- spin_lock(&root->fs_info->trans_lock);
- while (!list_empty(&root->fs_info->trans_list)) {
- t = list_first_entry(&root->fs_info->trans_list,
+ spin_lock(&fs_info->trans_lock);
+ while (!list_empty(&fs_info->trans_list)) {
+ t = list_first_entry(&fs_info->trans_list,
struct btrfs_transaction, list);
if (t->state >= TRANS_STATE_COMMIT_START) {
atomic_inc(&t->use_count);
- spin_unlock(&root->fs_info->trans_lock);
- btrfs_wait_for_commit(root, t->transid);
+ spin_unlock(&fs_info->trans_lock);
+ btrfs_wait_for_commit(fs_info, t->transid);
btrfs_put_transaction(t);
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
continue;
}
- if (t == root->fs_info->running_transaction) {
+ if (t == fs_info->running_transaction) {
t->state = TRANS_STATE_COMMIT_DOING;
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
/*
* We wait for 0 num_writers since we don't hold a trans
* handle open currently for this transaction.
@@ -4629,27 +4636,27 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
wait_event(t->writer_wait,
atomic_read(&t->num_writers) == 0);
} else {
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
}
- btrfs_cleanup_one_transaction(t, root);
+ btrfs_cleanup_one_transaction(t, fs_info);
- spin_lock(&root->fs_info->trans_lock);
- if (t == root->fs_info->running_transaction)
- root->fs_info->running_transaction = NULL;
+ spin_lock(&fs_info->trans_lock);
+ if (t == fs_info->running_transaction)
+ fs_info->running_transaction = NULL;
list_del_init(&t->list);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
btrfs_put_transaction(t);
- trace_btrfs_transaction_commit(root);
- spin_lock(&root->fs_info->trans_lock);
- }
- spin_unlock(&root->fs_info->trans_lock);
- btrfs_destroy_all_ordered_extents(root->fs_info);
- btrfs_destroy_delayed_inodes(root);
- btrfs_assert_delayed_root_empty(root);
- btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
- btrfs_destroy_all_delalloc_inodes(root->fs_info);
- mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+ trace_btrfs_transaction_commit(fs_info->tree_root);
+ spin_lock(&fs_info->trans_lock);
+ }
+ spin_unlock(&fs_info->trans_lock);
+ btrfs_destroy_all_ordered_extents(fs_info);
+ btrfs_destroy_delayed_inodes(fs_info);
+ btrfs_assert_delayed_root_empty(fs_info);
+ btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
+ btrfs_destroy_all_delalloc_inodes(fs_info);
+ mutex_unlock(&fs_info->transaction_kthread_mutex);
return 0;
}
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 1a3237e5700f..44dcd9af6b7c 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -44,27 +44,26 @@ static inline u64 btrfs_sb_offset(int mirror)
struct btrfs_device;
struct btrfs_fs_devices;
-struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
- u64 parent_transid);
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr);
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
+struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info,
+ u64 bytenr, u64 parent_transid);
+void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr);
+int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
int mirror_num, struct extent_buffer **eb);
-struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
- u64 bytenr);
+struct extent_buffer *btrfs_find_create_tree_block(
+ struct btrfs_fs_info *fs_info,
+ u64 bytenr);
void clean_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, struct extent_buffer *buf);
int open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
-void close_ctree(struct btrfs_root *root);
+void close_ctree(struct btrfs_fs_info *fs_info);
int write_ctree_super(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int max_mirrors);
+ struct btrfs_fs_info *fs_info, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
struct buffer_head **bh_ret);
-int btrfs_commit_super(struct btrfs_root *root);
-struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
- u64 bytenr);
+int btrfs_commit_super(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
struct btrfs_key *location);
int btrfs_init_fs_root(struct btrfs_root *root);
@@ -85,15 +84,14 @@ btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
}
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
-void btrfs_btree_balance_dirty(struct btrfs_root *root);
-void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
+void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_root(struct btrfs_root *root);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
- u32 sectorsize, u32 nodesize);
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
#endif
/*
@@ -121,7 +119,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int atomic);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(char *data, u32 seed, size_t len);
-void btrfs_csum_final(u32 crc, char *result);
+void btrfs_csum_final(u32 crc, u8 *result);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
@@ -137,9 +135,9 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 objectid);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 2513a7f53334..340d90751263 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -153,6 +153,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
static struct dentry *btrfs_get_parent(struct dentry *child)
{
struct inode *dir = d_inode(child);
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct extent_buffer *leaf;
@@ -169,7 +170,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
key.objectid = root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
- root = root->fs_info->tree_root;
+ root = fs_info->tree_root;
} else {
key.objectid = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
@@ -205,13 +206,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
btrfs_free_path(path);
if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
- return btrfs_get_dentry(root->fs_info->sb, key.objectid,
+ return btrfs_get_dentry(fs_info->sb, key.objectid,
found_key.offset, 0, 0);
}
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
+ return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL));
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
@@ -222,6 +223,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
{
struct inode *inode = d_inode(child);
struct inode *dir = d_inode(parent);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_inode_ref *iref;
@@ -250,7 +252,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
key.objectid = BTRFS_I(inode)->root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
- root = root->fs_info->tree_root;
+ root = fs_info->tree_root;
} else {
key.objectid = ino;
key.offset = btrfs_ino(dir);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4607af38c72e..e97302f437a1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -61,10 +61,10 @@ enum {
};
static int update_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr,
+ struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, int alloc);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
@@ -73,17 +73,17 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins);
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 flags,
+ struct btrfs_fs_info *fs_info, u64 flags,
int force);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
@@ -96,8 +96,6 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 num_bytes, int delalloc);
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
-int btrfs_pin_extent(struct btrfs_root *root,
- u64 bytenr, u64 num_bytes, int reserved);
static int __reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_space_info *space_info,
u64 orig_bytes,
@@ -223,18 +221,18 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
return ret;
}
-static int add_excluded_extent(struct btrfs_root *root,
+static int add_excluded_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes)
{
u64 end = start + num_bytes - 1;
- set_extent_bits(&root->fs_info->freed_extents[0],
+ set_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
- set_extent_bits(&root->fs_info->freed_extents[1],
+ set_extent_bits(&fs_info->freed_extents[1],
start, end, EXTENT_UPTODATE);
return 0;
}
-static void free_excluded_extents(struct btrfs_root *root,
+static void free_excluded_extents(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
u64 start, end;
@@ -242,13 +240,13 @@ static void free_excluded_extents(struct btrfs_root *root,
start = cache->key.objectid;
end = start + cache->key.offset - 1;
- clear_extent_bits(&root->fs_info->freed_extents[0],
+ clear_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
- clear_extent_bits(&root->fs_info->freed_extents[1],
+ clear_extent_bits(&fs_info->freed_extents[1],
start, end, EXTENT_UPTODATE);
}
-static int exclude_super_stripes(struct btrfs_root *root,
+static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
u64 bytenr;
@@ -259,7 +257,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
cache->bytes_super += stripe_len;
- ret = add_excluded_extent(root, cache->key.objectid,
+ ret = add_excluded_extent(fs_info, cache->key.objectid,
stripe_len);
if (ret)
return ret;
@@ -267,7 +265,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(root->fs_info, cache->key.objectid,
+ ret = btrfs_rmap_block(fs_info, cache->key.objectid,
bytenr, 0, &logical, &nr, &stripe_len);
if (ret)
return ret;
@@ -293,7 +291,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
}
cache->bytes_super += len;
- ret = add_excluded_extent(root, start, len);
+ ret = add_excluded_extent(fs_info, start, len);
if (ret) {
kfree(logical);
return ret;
@@ -329,13 +327,13 @@ static void put_caching_control(struct btrfs_caching_control *ctl)
}
#ifdef CONFIG_BTRFS_DEBUG
-static void fragment_free_space(struct btrfs_root *root,
- struct btrfs_block_group_cache *block_group)
+static void fragment_free_space(struct btrfs_block_group_cache *block_group)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
u64 start = block_group->key.objectid;
u64 len = block_group->key.offset;
u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
- root->nodesize : root->sectorsize;
+ fs_info->nodesize : fs_info->sectorsize;
u64 step = chunk << 1;
while (len > chunk) {
@@ -394,9 +392,9 @@ u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
{
- struct btrfs_block_group_cache *block_group;
- struct btrfs_fs_info *fs_info;
- struct btrfs_root *extent_root;
+ struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -406,10 +404,6 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
int ret;
bool wakeup = true;
- block_group = caching_ctl->block_group;
- fs_info = block_group->fs_info;
- extent_root = fs_info->extent_root;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -422,7 +416,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
* allocate from this block group until we've had a chance to fragment
* the free space.
*/
- if (btrfs_should_fragment_free_space(extent_root, block_group))
+ if (btrfs_should_fragment_free_space(block_group))
wakeup = false;
#endif
/*
@@ -510,7 +504,7 @@ next:
key.objectid);
if (key.type == BTRFS_METADATA_ITEM_KEY)
last = key.objectid +
- fs_info->tree_root->nodesize;
+ fs_info->nodesize;
else
last = key.objectid + key.offset;
@@ -561,7 +555,7 @@ static noinline void caching_thread(struct btrfs_work *work)
spin_unlock(&block_group->lock);
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(extent_root, block_group)) {
+ if (btrfs_should_fragment_free_space(block_group)) {
u64 bytes_used;
spin_lock(&block_group->space_info->lock);
@@ -571,14 +565,14 @@ static noinline void caching_thread(struct btrfs_work *work)
block_group->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&block_group->lock);
spin_unlock(&block_group->space_info->lock);
- fragment_free_space(extent_root, block_group);
+ fragment_free_space(block_group);
}
#endif
caching_ctl->progress = (u64)-1;
up_read(&fs_info->commit_root_sem);
- free_excluded_extents(fs_info->extent_root, block_group);
+ free_excluded_extents(fs_info, block_group);
mutex_unlock(&caching_ctl->mutex);
wake_up(&caching_ctl->wait);
@@ -668,8 +662,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
spin_unlock(&cache->lock);
#ifdef CONFIG_BTRFS_DEBUG
if (ret == 1 &&
- btrfs_should_fragment_free_space(fs_info->extent_root,
- cache)) {
+ btrfs_should_fragment_free_space(cache)) {
u64 bytes_used;
spin_lock(&cache->space_info->lock);
@@ -679,7 +672,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
cache->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- fragment_free_space(fs_info->extent_root, cache);
+ fragment_free_space(cache);
}
#endif
mutex_unlock(&caching_ctl->mutex);
@@ -687,7 +680,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
wake_up(&caching_ctl->wait);
if (ret == 1) {
put_caching_control(caching_ctl);
- free_excluded_extents(fs_info->extent_root, cache);
+ free_excluded_extents(fs_info, cache);
return 0;
}
} else {
@@ -778,7 +771,7 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
}
/* simple helper to search for an existing data extent at a given offset */
-int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
+int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
{
int ret;
struct btrfs_key key;
@@ -791,8 +784,7 @@ int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
key.objectid = start;
key.offset = len;
key.type = BTRFS_EXTENT_ITEM_KEY;
- ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
- 0, 0);
+ ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
btrfs_free_path(path);
return ret;
}
@@ -807,7 +799,7 @@ int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
* the delayed refs are not processed.
*/
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr,
+ struct btrfs_fs_info *fs_info, u64 bytenr,
u64 offset, int metadata, u64 *refs, u64 *flags)
{
struct btrfs_delayed_ref_head *head;
@@ -825,8 +817,8 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
* If we don't have skinny metadata, don't bother doing anything
* different
*/
- if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
- offset = root->nodesize;
+ if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
+ offset = fs_info->nodesize;
metadata = 0;
}
@@ -847,8 +839,7 @@ search_again:
else
key.type = BTRFS_EXTENT_ITEM_KEY;
- ret = btrfs_search_slot(trans, root->fs_info->extent_root,
- &key, path, 0, 0);
+ ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
if (ret < 0)
goto out_free;
@@ -859,7 +850,7 @@ search_again:
path->slots[0]);
if (key.objectid == bytenr &&
key.type == BTRFS_EXTENT_ITEM_KEY &&
- key.offset == root->nodesize)
+ key.offset == fs_info->nodesize)
ret = 0;
}
}
@@ -1101,7 +1092,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
return ret;
BUG_ON(ret); /* Corruption */
- btrfs_extend_item(root, path, new_size);
+ btrfs_extend_item(root->fs_info, path, new_size);
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1114,7 +1105,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
BTRFS_BLOCK_FLAG_FULL_BACKREF);
bi = (struct btrfs_tree_block_info *)(item + 1);
/* FIXME: get first key of the block */
- memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
+ memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
btrfs_set_tree_block_level(leaf, bi, (int)owner);
} else {
btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
@@ -1540,6 +1531,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int insert)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
@@ -1553,8 +1545,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
int want;
int ret;
int err = 0;
- bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
- SKINNY_METADATA);
+ bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
@@ -1748,7 +1739,7 @@ void setup_inline_extent_backref(struct btrfs_root *root,
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
- btrfs_extend_item(root, path, size);
+ btrfs_extend_item(root->fs_info, path, size);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
@@ -1875,7 +1866,7 @@ void update_inline_extent_backref(struct btrfs_root *root,
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
- btrfs_truncate_item(root, path, item_size, 1);
+ btrfs_truncate_item(root->fs_info, path, item_size, 1);
}
btrfs_mark_buffer_dirty(leaf);
}
@@ -2022,7 +2013,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
return ret;
}
-int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes)
{
int ret;
@@ -2034,10 +2025,10 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
* Avoid races with device replace and make sure our bbio has devices
* associated to its stripes that don't go away while we are discarding.
*/
- btrfs_bio_counter_inc_blocked(root->fs_info);
+ btrfs_bio_counter_inc_blocked(fs_info);
/* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
- bytenr, &num_bytes, &bbio, 0);
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
+ &bbio, 0);
/* Error condition is -ENOMEM */
if (!ret) {
struct btrfs_bio_stripe *stripe = bbio->stripes;
@@ -2067,7 +2058,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
}
btrfs_put_bbio(bbio);
}
- btrfs_bio_counter_dec(root->fs_info);
+ btrfs_bio_counter_dec(fs_info);
if (actual_bytes)
*actual_bytes = discarded_bytes;
@@ -2080,12 +2071,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
/* Can return -ENOMEM */
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
root_objectid == BTRFS_TREE_LOG_OBJECTID);
@@ -2105,13 +2095,12 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
}
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
struct btrfs_delayed_extent_op *extent_op)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_extent_item *item;
@@ -2154,7 +2143,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
path->reada = READA_FORWARD;
path->leave_spinning = 1;
/* now insert the actual backref */
- ret = insert_extent_backref(trans, root->fs_info->extent_root,
+ ret = insert_extent_backref(trans, fs_info->extent_root,
path, bytenr, parent, root_objectid,
owner, offset, refs_to_add);
if (ret)
@@ -2165,7 +2154,7 @@ out:
}
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2182,7 +2171,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
ref = btrfs_delayed_node_to_data_ref(node);
- trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
+ trace_run_delayed_data_ref(fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
@@ -2191,17 +2180,17 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
if (extent_op)
flags |= extent_op->flags_to_set;
- ret = alloc_reserved_file_extent(trans, root,
+ ret = alloc_reserved_file_extent(trans, fs_info,
parent, ref_root, flags,
ref->objectid, ref->offset,
&ins, node->ref_mod);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, root, node, parent,
+ ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, root, node, parent,
+ ret = __btrfs_free_extent(trans, fs_info, node, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
@@ -2230,7 +2219,7 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
}
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
@@ -2246,7 +2235,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
if (trans->aborted)
return 0;
- if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
+ if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
metadata = 0;
path = btrfs_alloc_path();
@@ -2266,8 +2255,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
again:
path->reada = READA_FORWARD;
path->leave_spinning = 1;
- ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
- path, 0, 1);
+ ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
if (ret < 0) {
err = ret;
goto out;
@@ -2302,7 +2290,7 @@ again:
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
- ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
+ ret = convert_extent_item_v0(trans, fs_info->extent_root,
path, (u64)-1, 0);
if (ret < 0) {
err = ret;
@@ -2323,7 +2311,7 @@ out:
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2333,11 +2321,10 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_key ins;
u64 parent = 0;
u64 ref_root = 0;
- bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
- SKINNY_METADATA);
+ bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
ref = btrfs_delayed_node_to_tree_ref(node);
- trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
+ trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
parent = ref->parent;
@@ -2353,7 +2340,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
}
if (node->ref_mod != 1) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
@@ -2361,18 +2348,18 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
- ret = alloc_reserved_tree_block(trans, root,
+ ret = alloc_reserved_tree_block(trans, fs_info,
parent, ref_root,
extent_op->flags_to_set,
&extent_op->key,
ref->level, &ins);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, root, node,
+ ret = __btrfs_inc_extent_ref(trans, fs_info, node,
parent, ref_root,
ref->level, 0, 1,
extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, root, node,
+ ret = __btrfs_free_extent(trans, fs_info, node,
parent, ref_root,
ref->level, 0, 1, extent_op);
} else {
@@ -2383,7 +2370,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2392,7 +2379,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
if (trans->aborted) {
if (insert_reserved)
- btrfs_pin_extent(root, node->bytenr,
+ btrfs_pin_extent(fs_info, node->bytenr,
node->num_bytes, 1);
return 0;
}
@@ -2407,33 +2394,31 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
*/
BUG_ON(extent_op);
head = btrfs_delayed_node_to_head(node);
- trace_run_delayed_ref_head(root->fs_info, node, head,
- node->action);
+ trace_run_delayed_ref_head(fs_info, node, head, node->action);
if (insert_reserved) {
- btrfs_pin_extent(root, node->bytenr,
+ btrfs_pin_extent(fs_info, node->bytenr,
node->num_bytes, 1);
if (head->is_data) {
- ret = btrfs_del_csums(trans, root,
+ ret = btrfs_del_csums(trans, fs_info,
node->bytenr,
node->num_bytes);
}
}
/* Also free its reserved qgroup space */
- btrfs_qgroup_free_delayed_ref(root->fs_info,
- head->qgroup_ref_root,
+ btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
head->qgroup_reserved);
return ret;
}
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
- ret = run_delayed_tree_ref(trans, root, node, extent_op,
+ ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
insert_reserved);
else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
- ret = run_delayed_data_ref(trans, root, node, extent_op,
+ ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
insert_reserved);
else
BUG();
@@ -2454,13 +2439,14 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
* the extent item from the extent tree, when there still are references
* to add, which would fail because they would not find the extent item.
*/
- list_for_each_entry(ref, &head->ref_list, list) {
- if (ref->action == BTRFS_ADD_DELAYED_REF)
- return ref;
- }
+ if (!list_empty(&head->ref_add_list))
+ return list_first_entry(&head->ref_add_list,
+ struct btrfs_delayed_ref_node, add_list);
- return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
- list);
+ ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+ list);
+ ASSERT(list_empty(&ref->add_list));
+ return ref;
}
/*
@@ -2468,14 +2454,13 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
*/
static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
unsigned long nr)
{
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op;
- struct btrfs_fs_info *fs_info = root->fs_info;
ktime_t start = ktime_get();
int ret;
unsigned long count = 0;
@@ -2574,7 +2559,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (extent_op) {
spin_unlock(&locked_ref->lock);
- ret = run_delayed_extent_op(trans, root,
+ ret = run_delayed_extent_op(trans, fs_info,
ref, extent_op);
btrfs_free_delayed_extent_op(extent_op);
@@ -2620,6 +2605,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
actual_count++;
ref->in_tree = 0;
list_del(&ref->list);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
}
atomic_dec(&delayed_refs->num_entries);
@@ -2642,7 +2629,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}
spin_unlock(&locked_ref->lock);
- ret = run_one_delayed_ref(trans, root, ref, extent_op,
+ ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
must_insert_reserved);
btrfs_free_delayed_extent_op(extent_op);
@@ -2743,43 +2730,43 @@ static u64 find_middle(struct rb_root *root)
}
#endif
-static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
+static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
{
u64 num_bytes;
num_bytes = heads * (sizeof(struct btrfs_extent_item) +
sizeof(struct btrfs_extent_inline_ref));
- if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
+ if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
num_bytes += heads * sizeof(struct btrfs_tree_block_info);
/*
* We don't ever fill up leaves all the way so multiply by 2 just to be
* closer to what we're really going to want to use.
*/
- return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
+ return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
}
/*
* Takes the number of bytes to be csumm'ed and figures out how many leaves it
* would require to store the csums for that many bytes.
*/
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
+u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
{
u64 csum_size;
u64 num_csums_per_leaf;
u64 num_csums;
- csum_size = BTRFS_MAX_ITEM_SIZE(root);
+ csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
num_csums_per_leaf = div64_u64(csum_size,
- (u64)btrfs_super_csum_size(root->fs_info->super_copy));
- num_csums = div64_u64(csum_bytes, root->sectorsize);
+ (u64)btrfs_super_csum_size(fs_info->super_copy));
+ num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
num_csums += num_csums_per_leaf - 1;
num_csums = div64_u64(num_csums, num_csums_per_leaf);
return num_csums;
}
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
@@ -2788,15 +2775,16 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
u64 num_bytes, num_dirty_bgs_bytes;
int ret = 0;
- num_bytes = btrfs_calc_trans_metadata_size(root, 1);
- num_heads = heads_to_leaves(root, num_heads);
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+ num_heads = heads_to_leaves(fs_info, num_heads);
if (num_heads > 1)
- num_bytes += (num_heads - 1) * root->nodesize;
+ num_bytes += (num_heads - 1) * fs_info->nodesize;
num_bytes <<= 1;
- num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
- num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
+ num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
+ fs_info->nodesize;
+ num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
num_dirty_bgs);
- global_rsv = &root->fs_info->global_block_rsv;
+ global_rsv = &fs_info->global_block_rsv;
/*
* If we can't allocate any more chunks lets make sure we have _lots_ of
@@ -2815,9 +2803,8 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 num_entries =
atomic_read(&trans->transaction->delayed_refs.num_entries);
u64 avg_runtime;
@@ -2826,12 +2813,12 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
smp_mb();
avg_runtime = fs_info->avg_delayed_ref_runtime;
val = num_entries * avg_runtime;
- if (num_entries * avg_runtime >= NSEC_PER_SEC)
+ if (val >= NSEC_PER_SEC)
return 1;
if (val >= NSEC_PER_SEC / 2)
return 2;
- return btrfs_check_space_for_delayed_refs(trans, root);
+ return btrfs_check_space_for_delayed_refs(trans, fs_info);
}
struct async_delayed_refs {
@@ -2844,16 +2831,21 @@ struct async_delayed_refs {
struct btrfs_work work;
};
+static inline struct async_delayed_refs *
+to_async_delayed_refs(struct btrfs_work *work)
+{
+ return container_of(work, struct async_delayed_refs, work);
+}
+
static void delayed_ref_async_start(struct btrfs_work *work)
{
- struct async_delayed_refs *async;
+ struct async_delayed_refs *async = to_async_delayed_refs(work);
struct btrfs_trans_handle *trans;
+ struct btrfs_fs_info *fs_info = async->root->fs_info;
int ret;
- async = container_of(work, struct async_delayed_refs, work);
-
/* if the commit is already started, we don't need to wait here */
- if (btrfs_transaction_blocked(async->root->fs_info))
+ if (btrfs_transaction_blocked(fs_info))
goto done;
trans = btrfs_join_transaction(async->root);
@@ -2872,11 +2864,11 @@ static void delayed_ref_async_start(struct btrfs_work *work)
if (trans->transid > async->transid)
goto end;
- ret = btrfs_run_delayed_refs(trans, async->root, async->count);
+ ret = btrfs_run_delayed_refs(trans, fs_info, async->count);
if (ret)
async->error = ret;
end:
- ret = btrfs_end_transaction(trans, async->root);
+ ret = btrfs_end_transaction(trans);
if (ret && !async->error)
async->error = ret;
done:
@@ -2886,7 +2878,7 @@ done:
kfree(async);
}
-int btrfs_async_run_delayed_refs(struct btrfs_root *root,
+int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait)
{
struct async_delayed_refs *async;
@@ -2896,7 +2888,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
if (!async)
return -ENOMEM;
- async->root = root->fs_info->tree_root;
+ async->root = fs_info->tree_root;
async->count = count;
async->error = 0;
async->transid = transid;
@@ -2909,7 +2901,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
btrfs_init_work(&async->work, btrfs_extent_refs_helper,
delayed_ref_async_start, NULL, NULL);
- btrfs_queue_work(root->fs_info->extent_workers, &async->work);
+ btrfs_queue_work(fs_info->extent_workers, &async->work);
if (wait) {
wait_for_completion(&async->wait);
@@ -2931,7 +2923,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
* Returns <0 on error and aborts the transaction
*/
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, unsigned long count)
+ struct btrfs_fs_info *fs_info, unsigned long count)
{
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -2944,12 +2936,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (trans->aborted)
return 0;
- if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
+ if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
return 0;
- if (root == root->fs_info->extent_root)
- root = root->fs_info->tree_root;
-
delayed_refs = &trans->transaction->delayed_refs;
if (count == 0)
count = atomic_read(&delayed_refs->num_entries) * 2;
@@ -2959,7 +2948,7 @@ again:
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
trans->can_flush_pending_bgs = false;
- ret = __btrfs_run_delayed_refs(trans, root, count);
+ ret = __btrfs_run_delayed_refs(trans, fs_info, count);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
return ret;
@@ -2967,7 +2956,7 @@ again:
if (run_all) {
if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, fs_info);
spin_lock(&delayed_refs->lock);
node = rb_first(&delayed_refs->href_root);
@@ -3012,7 +3001,7 @@ out:
}
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data)
{
@@ -3029,7 +3018,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
extent_op->is_data = is_data ? true : false;
extent_op->level = level;
- ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
num_bytes, extent_op);
if (ret)
btrfs_free_delayed_extent_op(extent_op);
@@ -3103,7 +3092,8 @@ static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 objectid, u64 offset, u64 bytenr)
{
- struct btrfs_root *extent_root = root->fs_info->extent_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *extent_root = fs_info->extent_root;
struct extent_buffer *leaf;
struct btrfs_extent_data_ref *ref;
struct btrfs_extent_inline_ref *iref;
@@ -3210,6 +3200,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct extent_buffer *buf,
int full_backref, int inc)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 num_bytes;
u64 parent;
@@ -3220,11 +3211,12 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
int i;
int level;
int ret = 0;
- int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
+ int (*process_func)(struct btrfs_trans_handle *,
+ struct btrfs_fs_info *,
u64, u64, u64, u64, u64, u64);
- if (btrfs_is_testing(root->fs_info))
+ if (btrfs_is_testing(fs_info))
return 0;
ref_root = btrfs_header_owner(buf);
@@ -3260,15 +3252,15 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
key.offset -= btrfs_file_extent_offset(buf, fi);
- ret = process_func(trans, root, bytenr, num_bytes,
+ ret = process_func(trans, fs_info, bytenr, num_bytes,
parent, ref_root, key.objectid,
key.offset);
if (ret)
goto fail;
} else {
bytenr = btrfs_node_blockptr(buf, i);
- num_bytes = root->nodesize;
- ret = process_func(trans, root, bytenr, num_bytes,
+ num_bytes = fs_info->nodesize;
+ ret = process_func(trans, fs_info, bytenr, num_bytes,
parent, ref_root, level - 1, 0);
if (ret)
goto fail;
@@ -3292,12 +3284,12 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
}
static int write_one_cache_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_block_group_cache *cache)
{
int ret;
- struct btrfs_root *extent_root = root->fs_info->extent_root;
+ struct btrfs_root *extent_root = fs_info->extent_root;
unsigned long bi;
struct extent_buffer *leaf;
@@ -3319,22 +3311,20 @@ fail:
}
static struct btrfs_block_group_cache *
-next_block_group(struct btrfs_root *root,
+next_block_group(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
struct rb_node *node;
- spin_lock(&root->fs_info->block_group_cache_lock);
+ spin_lock(&fs_info->block_group_cache_lock);
/* If our block group was removed, we need a full search. */
if (RB_EMPTY_NODE(&cache->cache_node)) {
const u64 next_bytenr = cache->key.objectid + cache->key.offset;
- spin_unlock(&root->fs_info->block_group_cache_lock);
+ spin_unlock(&fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
- cache = btrfs_lookup_first_block_group(root->fs_info,
- next_bytenr);
- return cache;
+ cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
}
node = rb_next(&cache->cache_node);
btrfs_put_block_group(cache);
@@ -3344,7 +3334,7 @@ next_block_group(struct btrfs_root *root,
btrfs_get_block_group(cache);
} else
cache = NULL;
- spin_unlock(&root->fs_info->block_group_cache_lock);
+ spin_unlock(&fs_info->block_group_cache_lock);
return cache;
}
@@ -3352,7 +3342,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
struct btrfs_trans_handle *trans,
struct btrfs_path *path)
{
- struct btrfs_root *root = block_group->fs_info->tree_root;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_root *root = fs_info->tree_root;
struct inode *inode = NULL;
u64 alloc_hint = 0;
int dcs = BTRFS_DC_ERROR;
@@ -3425,8 +3416,8 @@ again:
WARN_ON(ret);
if (i_size_read(inode) > 0) {
- ret = btrfs_check_trunc_cache_free_space(root,
- &root->fs_info->global_block_rsv);
+ ret = btrfs_check_trunc_cache_free_space(fs_info,
+ &fs_info->global_block_rsv);
if (ret)
goto out_put;
@@ -3437,7 +3428,7 @@ again:
spin_lock(&block_group->lock);
if (block_group->cached != BTRFS_CACHE_FINISHED ||
- !btrfs_test_opt(root->fs_info, SPACE_CACHE)) {
+ !btrfs_test_opt(fs_info, SPACE_CACHE)) {
/*
* don't bother trying to write stuff out _if_
* a) we're not cached,
@@ -3506,14 +3497,14 @@ out:
}
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache, *tmp;
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_path *path;
if (list_empty(&cur_trans->dirty_bgs) ||
- !btrfs_test_opt(root->fs_info, SPACE_CACHE))
+ !btrfs_test_opt(fs_info, SPACE_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -3544,7 +3535,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
* we're still allowing others to join the commit.
*/
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
@@ -3569,7 +3560,7 @@ again:
* make sure all the block groups on our dirty list actually
* exist
*/
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, fs_info);
if (!path) {
path = btrfs_alloc_path();
@@ -3594,9 +3585,7 @@ again:
*/
if (!list_empty(&cache->io_list)) {
list_del_init(&cache->io_list);
- btrfs_wait_cache_io(root, trans, cache,
- &cache->io_ctl, path,
- cache->key.objectid);
+ btrfs_wait_cache_io(trans, cache, path);
btrfs_put_block_group(cache);
}
@@ -3619,7 +3608,8 @@ again:
if (cache->disk_cache_state == BTRFS_DC_SETUP) {
cache->io_ctl.inode = NULL;
- ret = btrfs_write_out_cache(root, trans, cache, path);
+ ret = btrfs_write_out_cache(fs_info, trans,
+ cache, path);
if (ret == 0 && cache->io_ctl.inode) {
num_started++;
should_put = 0;
@@ -3638,7 +3628,8 @@ again:
}
}
if (!ret) {
- ret = write_one_cache_group(trans, root, path, cache);
+ ret = write_one_cache_group(trans, fs_info,
+ path, cache);
/*
* Our block group might still be attached to the list
* of new block groups in the transaction handle of some
@@ -3683,7 +3674,7 @@ again:
* go through delayed refs for all the stuff we've just kicked off
* and then loop back (just once)
*/
- ret = btrfs_run_delayed_refs(trans, root, 0);
+ ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (!ret && loops == 0) {
loops++;
spin_lock(&cur_trans->dirty_bgs_lock);
@@ -3698,7 +3689,7 @@ again:
}
spin_unlock(&cur_trans->dirty_bgs_lock);
} else if (ret < 0) {
- btrfs_cleanup_dirty_bgs(cur_trans, root);
+ btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
}
btrfs_free_path(path);
@@ -3706,7 +3697,7 @@ again:
}
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
@@ -3749,9 +3740,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
if (!list_empty(&cache->io_list)) {
spin_unlock(&cur_trans->dirty_bgs_lock);
list_del_init(&cache->io_list);
- btrfs_wait_cache_io(root, trans, cache,
- &cache->io_ctl, path,
- cache->key.objectid);
+ btrfs_wait_cache_io(trans, cache, path);
btrfs_put_block_group(cache);
spin_lock(&cur_trans->dirty_bgs_lock);
}
@@ -3767,11 +3756,13 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
cache_save_setup(cache, trans, path);
if (!ret)
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
+ ret = btrfs_run_delayed_refs(trans, fs_info,
+ (unsigned long) -1);
if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
cache->io_ctl.inode = NULL;
- ret = btrfs_write_out_cache(root, trans, cache, path);
+ ret = btrfs_write_out_cache(fs_info, trans,
+ cache, path);
if (ret == 0 && cache->io_ctl.inode) {
num_started++;
should_put = 0;
@@ -3785,7 +3776,8 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
}
}
if (!ret) {
- ret = write_one_cache_group(trans, root, path, cache);
+ ret = write_one_cache_group(trans, fs_info,
+ path, cache);
/*
* One of the free space endio workers might have
* created a new block group while updating a free space
@@ -3802,8 +3794,8 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
if (ret == -ENOENT) {
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
- ret = write_one_cache_group(trans, root, path,
- cache);
+ ret = write_one_cache_group(trans, fs_info,
+ path, cache);
}
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -3820,8 +3812,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
cache = list_first_entry(io, struct btrfs_block_group_cache,
io_list);
list_del_init(&cache->io_list);
- btrfs_wait_cache_io(root, trans, cache,
- &cache->io_ctl, path, cache->key.objectid);
+ btrfs_wait_cache_io(trans, cache, path);
btrfs_put_block_group(cache);
}
@@ -3829,12 +3820,12 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
return ret;
}
-int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
+int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group_cache *block_group;
int readonly = 0;
- block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
+ block_group = btrfs_lookup_block_group(fs_info, bytenr);
if (!block_group || block_group->ro)
readonly = 1;
if (block_group)
@@ -4043,9 +4034,9 @@ static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
* progress (either running or paused) picks the target profile (if it's
* already available), otherwise falls back to plain reducing.
*/
-static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
+static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
{
- u64 num_devices = root->fs_info->fs_devices->rw_devices;
+ u64 num_devices = fs_info->fs_devices->rw_devices;
u64 target;
u64 raid_type;
u64 allowed = 0;
@@ -4054,16 +4045,16 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
* see if restripe for this chunk_type is in progress, if so
* try to reduce to the target profile
*/
- spin_lock(&root->fs_info->balance_lock);
- target = get_restripe_target(root->fs_info, flags);
+ spin_lock(&fs_info->balance_lock);
+ target = get_restripe_target(fs_info, flags);
if (target) {
/* pick target profile only if it's already available */
if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
- spin_unlock(&root->fs_info->balance_lock);
+ spin_unlock(&fs_info->balance_lock);
return extended_to_chunk(target);
}
}
- spin_unlock(&root->fs_info->balance_lock);
+ spin_unlock(&fs_info->balance_lock);
/* First, mask out the RAID levels which aren't possible */
for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
@@ -4088,39 +4079,40 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
return extended_to_chunk(flags | allowed);
}
-static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
+static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
{
unsigned seq;
u64 flags;
do {
flags = orig_flags;
- seq = read_seqbegin(&root->fs_info->profiles_lock);
+ seq = read_seqbegin(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA)
- flags |= root->fs_info->avail_data_alloc_bits;
+ flags |= fs_info->avail_data_alloc_bits;
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
- flags |= root->fs_info->avail_system_alloc_bits;
+ flags |= fs_info->avail_system_alloc_bits;
else if (flags & BTRFS_BLOCK_GROUP_METADATA)
- flags |= root->fs_info->avail_metadata_alloc_bits;
- } while (read_seqretry(&root->fs_info->profiles_lock, seq));
+ flags |= fs_info->avail_metadata_alloc_bits;
+ } while (read_seqretry(&fs_info->profiles_lock, seq));
- return btrfs_reduce_alloc_profile(root, flags);
+ return btrfs_reduce_alloc_profile(fs_info, flags);
}
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 flags;
u64 ret;
if (data)
flags = BTRFS_BLOCK_GROUP_DATA;
- else if (root == root->fs_info->chunk_root)
+ else if (root == fs_info->chunk_root)
flags = BTRFS_BLOCK_GROUP_SYSTEM;
else
flags = BTRFS_BLOCK_GROUP_METADATA;
- ret = get_alloc_profile(root, flags);
+ ret = get_alloc_profile(fs_info, flags);
return ret;
}
@@ -4135,7 +4127,7 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
int have_pinned_space;
/* make sure bytes are sectorsize aligned */
- bytes = ALIGN(bytes, root->sectorsize);
+ bytes = ALIGN(bytes, fs_info->sectorsize);
if (btrfs_is_free_space_inode(inode)) {
need_commit = 0;
@@ -4181,10 +4173,9 @@ alloc:
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- alloc_target,
+ ret = do_chunk_alloc(trans, fs_info, alloc_target,
CHUNK_ALLOC_NO_FORCE);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret < 0) {
if (ret != -ENOSPC)
return ret;
@@ -4213,12 +4204,13 @@ alloc:
/* commit the current transaction and try again */
commit_trans:
if (need_commit &&
- !atomic_read(&root->fs_info->open_ioctl_trans)) {
+ !atomic_read(&fs_info->open_ioctl_trans)) {
need_commit--;
if (need_commit > 0) {
btrfs_start_delalloc_roots(fs_info, 0, -1);
- btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, -1, 0,
+ (u64)-1);
}
trans = btrfs_join_transaction(root);
@@ -4228,7 +4220,7 @@ commit_trans:
test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
&trans->transaction->flags) ||
need_commit > 0) {
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
/*
@@ -4236,21 +4228,21 @@ commit_trans:
* operations. Wait for it to finish so that
* more space is released.
*/
- mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
- mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+ mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
+ mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
goto again;
} else {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
}
}
- trace_btrfs_space_reservation(root->fs_info,
+ trace_btrfs_space_reservation(fs_info,
"space_info:enospc",
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
data_sinfo->bytes_may_use += bytes;
- trace_btrfs_space_reservation(root->fs_info, "space_info",
+ trace_btrfs_space_reservation(fs_info, "space_info",
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
@@ -4264,13 +4256,13 @@ commit_trans:
*/
int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
/* align the range */
- len = round_up(start + len, root->sectorsize) -
- round_down(start, root->sectorsize);
- start = round_down(start, root->sectorsize);
+ len = round_up(start + len, fs_info->sectorsize) -
+ round_down(start, fs_info->sectorsize);
+ start = round_down(start, fs_info->sectorsize);
ret = btrfs_alloc_data_chunk_ondemand(inode, len);
if (ret < 0)
@@ -4294,21 +4286,21 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_space_info *data_sinfo;
/* Make sure the range is aligned to sectorsize */
- len = round_up(start + len, root->sectorsize) -
- round_down(start, root->sectorsize);
- start = round_down(start, root->sectorsize);
+ len = round_up(start + len, fs_info->sectorsize) -
+ round_down(start, fs_info->sectorsize);
+ start = round_down(start, fs_info->sectorsize);
- data_sinfo = root->fs_info->data_sinfo;
+ data_sinfo = fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
if (WARN_ON(data_sinfo->bytes_may_use < len))
data_sinfo->bytes_may_use = 0;
else
data_sinfo->bytes_may_use -= len;
- trace_btrfs_space_reservation(root->fs_info, "space_info",
+ trace_btrfs_space_reservation(fs_info, "space_info",
data_sinfo->flags, len, 0);
spin_unlock(&data_sinfo->lock);
}
@@ -4322,6 +4314,13 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
*/
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ /* Make sure the range is aligned to sectorsize */
+ len = round_up(start + len, root->fs_info->sectorsize) -
+ round_down(start, root->fs_info->sectorsize);
+ start = round_down(start, root->fs_info->sectorsize);
+
btrfs_free_reserved_data_space_noquota(inode, start, len);
btrfs_qgroup_free_data(inode, start, len);
}
@@ -4344,10 +4343,10 @@ static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
return (global->size << 1);
}
-static int should_alloc_chunk(struct btrfs_root *root,
+static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *sinfo, int force)
{
- struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
u64 thresh;
@@ -4368,7 +4367,7 @@ static int should_alloc_chunk(struct btrfs_root *root,
* about 1% of the FS size.
*/
if (force == CHUNK_ALLOC_LIMITED) {
- thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
+ thresh = btrfs_super_total_bytes(fs_info->super_copy);
thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
if (num_bytes - num_allocated < thresh)
@@ -4380,7 +4379,7 @@ static int should_alloc_chunk(struct btrfs_root *root,
return 1;
}
-static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
+static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
{
u64 num_dev;
@@ -4388,7 +4387,7 @@ static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6))
- num_dev = root->fs_info->fs_devices->rw_devices;
+ num_dev = fs_info->fs_devices->rw_devices;
else if (type & BTRFS_BLOCK_GROUP_RAID1)
num_dev = 2;
else
@@ -4403,8 +4402,7 @@ static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
* removing a chunk.
*/
void check_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 type)
+ struct btrfs_fs_info *fs_info, u64 type)
{
struct btrfs_space_info *info;
u64 left;
@@ -4416,43 +4414,43 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
* Needed because we can end up allocating a system chunk and for an
* atomic and race free space reservation in the chunk block reserve.
*/
- ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
+ ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
- info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
+ info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
spin_lock(&info->lock);
left = info->total_bytes - info->bytes_used - info->bytes_pinned -
info->bytes_reserved - info->bytes_readonly -
info->bytes_may_use;
spin_unlock(&info->lock);
- num_devs = get_profile_num_devs(root, type);
+ num_devs = get_profile_num_devs(fs_info, type);
/* num_devs device items to update and 1 chunk item to add or remove */
- thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
- btrfs_calc_trans_metadata_size(root, 1);
+ thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
+ btrfs_calc_trans_metadata_size(fs_info, 1);
- if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
- btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
- left, thresh, type);
- dump_space_info(root->fs_info, info, 0, 0);
+ if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
+ btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
+ left, thresh, type);
+ dump_space_info(fs_info, info, 0, 0);
}
if (left < thresh) {
u64 flags;
- flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
+ flags = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
/*
* Ignore failure to create system chunk. We might end up not
* needing it, as we might not need to COW all nodes/leafs from
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
*/
- ret = btrfs_alloc_chunk(trans, root, flags);
+ ret = btrfs_alloc_chunk(trans, fs_info, flags);
}
if (!ret) {
- ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
- &root->fs_info->chunk_block_rsv,
+ ret = btrfs_block_rsv_add(fs_info->chunk_root,
+ &fs_info->chunk_block_rsv,
thresh, BTRFS_RESERVE_NO_FLUSH);
if (!ret)
trans->chunk_bytes_reserved += thresh;
@@ -4469,10 +4467,9 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
* - return errors including -ENOSPC otherwise.
*/
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 flags, int force)
+ struct btrfs_fs_info *fs_info, u64 flags, int force)
{
struct btrfs_space_info *space_info;
- struct btrfs_fs_info *fs_info = extent_root->fs_info;
int wait_for_alloc = 0;
int ret = 0;
@@ -4480,10 +4477,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
if (trans->allocating_chunk)
return -ENOSPC;
- space_info = __find_space_info(extent_root->fs_info, flags);
+ space_info = __find_space_info(fs_info, flags);
if (!space_info) {
- ret = update_space_info(extent_root->fs_info, flags,
- 0, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
BUG_ON(ret); /* -ENOMEM */
}
BUG_ON(!space_info); /* Logic error */
@@ -4493,7 +4489,7 @@ again:
if (force < space_info->force_alloc)
force = space_info->force_alloc;
if (space_info->full) {
- if (should_alloc_chunk(extent_root, space_info, force))
+ if (should_alloc_chunk(fs_info, space_info, force))
ret = -ENOSPC;
else
ret = 0;
@@ -4501,7 +4497,7 @@ again:
return ret;
}
- if (!should_alloc_chunk(extent_root, space_info, force)) {
+ if (!should_alloc_chunk(fs_info, space_info, force)) {
spin_unlock(&space_info->lock);
return 0;
} else if (space_info->chunk_alloc) {
@@ -4551,9 +4547,9 @@ again:
* Check if we have enough space in SYSTEM chunk because we may need
* to update devices.
*/
- check_system_chunk(trans, extent_root, flags);
+ check_system_chunk(trans, fs_info, flags);
- ret = btrfs_alloc_chunk(trans, extent_root, flags);
+ ret = btrfs_alloc_chunk(trans, fs_info, flags);
trans->allocating_chunk = false;
spin_lock(&space_info->lock);
@@ -4585,7 +4581,7 @@ out:
*/
if (trans->can_flush_pending_bgs &&
trans->chunk_bytes_reserved >= (u64)SZ_2M) {
- btrfs_create_pending_block_groups(trans, extent_root);
+ btrfs_create_pending_block_groups(trans, fs_info);
btrfs_trans_release_chunk_metadata(trans);
}
return ret;
@@ -4595,7 +4591,8 @@ static int can_overcommit(struct btrfs_root *root,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_block_rsv *global_rsv;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 profile;
u64 space_size;
u64 avail;
@@ -4605,8 +4602,6 @@ static int can_overcommit(struct btrfs_root *root,
if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
return 0;
- BUG_ON(root->fs_info == NULL);
- global_rsv = &root->fs_info->global_block_rsv;
profile = btrfs_get_alloc_profile(root, 0);
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly;
@@ -4625,9 +4620,9 @@ static int can_overcommit(struct btrfs_root *root,
used += space_info->bytes_may_use;
- spin_lock(&root->fs_info->free_chunk_lock);
- avail = root->fs_info->free_chunk_space;
- spin_unlock(&root->fs_info->free_chunk_lock);
+ spin_lock(&fs_info->free_chunk_lock);
+ avail = fs_info->free_chunk_space;
+ spin_unlock(&fs_info->free_chunk_lock);
/*
* If we have dup, raid1 or raid10 then only half of the free
@@ -4655,10 +4650,10 @@ static int can_overcommit(struct btrfs_root *root,
return 0;
}
-static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
unsigned long nr_pages, int nr_items)
{
- struct super_block *sb = root->fs_info->sb;
+ struct super_block *sb = fs_info->sb;
if (down_read_trylock(&sb->s_umount)) {
writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
@@ -4671,19 +4666,19 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
* the filesystem is readonly(all dirty pages are written to
* the disk).
*/
- btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
+ btrfs_start_delalloc_roots(fs_info, 0, nr_items);
if (!current->journal_info)
- btrfs_wait_ordered_roots(root->fs_info, nr_items,
- 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
}
}
-static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
+static inline int calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
+ u64 to_reclaim)
{
u64 bytes;
int nr;
- bytes = btrfs_calc_trans_metadata_size(root, 1);
+ bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
nr = (int)div64_u64(to_reclaim, bytes);
if (!nr)
nr = 1;
@@ -4698,6 +4693,7 @@ static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
bool wait_ordered)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *block_rsv;
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
@@ -4710,21 +4706,20 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
enum btrfs_reserve_flush_enum flush;
/* Calc the number of the pages we need flush for space reservation */
- items = calc_reclaim_items_nr(root, to_reclaim);
+ items = calc_reclaim_items_nr(fs_info, to_reclaim);
to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
trans = (struct btrfs_trans_handle *)current->journal_info;
- block_rsv = &root->fs_info->delalloc_block_rsv;
+ block_rsv = &fs_info->delalloc_block_rsv;
space_info = block_rsv->space_info;
delalloc_bytes = percpu_counter_sum_positive(
- &root->fs_info->delalloc_bytes);
+ &fs_info->delalloc_bytes);
if (delalloc_bytes == 0) {
if (trans)
return;
if (wait_ordered)
- btrfs_wait_ordered_roots(root->fs_info, items,
- 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
return;
}
@@ -4732,12 +4727,12 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
while (delalloc_bytes && loops < 3) {
max_reclaim = min(delalloc_bytes, to_reclaim);
nr_pages = max_reclaim >> PAGE_SHIFT;
- btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
+ btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
/*
* We need to wait for the async pages to actually start before
* we do anything.
*/
- max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
+ max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
if (!max_reclaim)
goto skip_async;
@@ -4746,8 +4741,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
else
max_reclaim -= nr_pages;
- wait_event(root->fs_info->async_submit_wait,
- atomic_read(&root->fs_info->async_delalloc_pages) <=
+ wait_event(fs_info->async_submit_wait,
+ atomic_read(&fs_info->async_delalloc_pages) <=
(int)max_reclaim);
skip_async:
if (!trans)
@@ -4768,15 +4763,14 @@ skip_async:
loops++;
if (wait_ordered && !trans) {
- btrfs_wait_ordered_roots(root->fs_info, items,
- 0, (u64)-1);
+ btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
} else {
time_left = schedule_timeout_killable(1);
if (time_left)
break;
}
delalloc_bytes = percpu_counter_sum_positive(
- &root->fs_info->delalloc_bytes);
+ &fs_info->delalloc_bytes);
}
}
@@ -4794,7 +4788,8 @@ static int may_commit_transaction(struct btrfs_root *root,
struct btrfs_space_info *space_info,
u64 bytes, int force)
{
- struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
struct btrfs_trans_handle *trans;
trans = (struct btrfs_trans_handle *)current->journal_info;
@@ -4829,7 +4824,7 @@ commit:
if (IS_ERR(trans))
return -ENOSPC;
- return btrfs_commit_transaction(trans, root);
+ return btrfs_commit_transaction(trans);
}
struct reserve_ticket {
@@ -4843,6 +4838,7 @@ static int flush_space(struct btrfs_root *root,
struct btrfs_space_info *space_info, u64 num_bytes,
u64 orig_bytes, int state)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
int nr;
int ret = 0;
@@ -4851,7 +4847,7 @@ static int flush_space(struct btrfs_root *root,
case FLUSH_DELAYED_ITEMS_NR:
case FLUSH_DELAYED_ITEMS:
if (state == FLUSH_DELAYED_ITEMS_NR)
- nr = calc_reclaim_items_nr(root, num_bytes) * 2;
+ nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
else
nr = -1;
@@ -4860,8 +4856,8 @@ static int flush_space(struct btrfs_root *root,
ret = PTR_ERR(trans);
break;
}
- ret = btrfs_run_delayed_items_nr(trans, root, nr);
- btrfs_end_transaction(trans, root);
+ ret = btrfs_run_delayed_items_nr(trans, fs_info, nr);
+ btrfs_end_transaction(trans);
break;
case FLUSH_DELALLOC:
case FLUSH_DELALLOC_WAIT:
@@ -4874,10 +4870,10 @@ static int flush_space(struct btrfs_root *root,
ret = PTR_ERR(trans);
break;
}
- ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+ ret = do_chunk_alloc(trans, fs_info,
btrfs_get_alloc_profile(root, 0),
CHUNK_ALLOC_NO_FORCE);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret > 0 || ret == -ENOSPC)
ret = 0;
break;
@@ -4889,7 +4885,7 @@ static int flush_space(struct btrfs_root *root,
break;
}
- trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes,
+ trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes,
orig_bytes, state, ret);
return ret;
}
@@ -4935,6 +4931,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
struct btrfs_root *root, u64 used)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 thresh = div_factor_fine(space_info->total_bytes, 98);
/* If we're just plain full then async reclaim just slows us down. */
@@ -4944,9 +4941,8 @@ static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
if (!btrfs_calc_reclaim_metadata_size(root, space_info))
return 0;
- return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
- !test_bit(BTRFS_FS_STATE_REMOUNTING,
- &root->fs_info->fs_state));
+ return (used >= thresh && !btrfs_fs_closing(fs_info) &&
+ !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
static void wake_all_tickets(struct list_head *head)
@@ -5126,6 +5122,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct reserve_ticket ticket;
u64 used;
int ret = 0;
@@ -5146,15 +5143,13 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
*/
if (used + orig_bytes <= space_info->total_bytes) {
space_info->bytes_may_use += orig_bytes;
- trace_btrfs_space_reservation(root->fs_info, "space_info",
- space_info->flags, orig_bytes,
- 1);
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags, orig_bytes, 1);
ret = 0;
} else if (can_overcommit(root, space_info, orig_bytes, flush)) {
space_info->bytes_may_use += orig_bytes;
- trace_btrfs_space_reservation(root->fs_info, "space_info",
- space_info->flags, orig_bytes,
- 1);
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags, orig_bytes, 1);
ret = 0;
}
@@ -5173,7 +5168,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
list_add_tail(&ticket.list, &space_info->tickets);
if (!space_info->flush) {
space_info->flush = 1;
- trace_btrfs_trigger_flush(root->fs_info,
+ trace_btrfs_trigger_flush(fs_info,
space_info->flags,
orig_bytes, flush,
"enospc");
@@ -5191,15 +5186,13 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
* which means we won't have fs_info->fs_root set, so don't do
* the async reclaim as we will panic.
*/
- if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) &&
+ if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
need_do_async_reclaim(space_info, root, used) &&
- !work_busy(&root->fs_info->async_reclaim_work)) {
- trace_btrfs_trigger_flush(root->fs_info,
- space_info->flags,
- orig_bytes, flush,
- "preempt");
+ !work_busy(&fs_info->async_reclaim_work)) {
+ trace_btrfs_trigger_flush(fs_info, space_info->flags,
+ orig_bytes, flush, "preempt");
queue_work(system_unbound_wq,
- &root->fs_info->async_reclaim_work);
+ &fs_info->async_reclaim_work);
}
}
spin_unlock(&space_info->lock);
@@ -5207,19 +5200,19 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
return ret;
if (flush == BTRFS_RESERVE_FLUSH_ALL)
- return wait_reserve_ticket(root->fs_info, space_info, &ticket,
+ return wait_reserve_ticket(fs_info, space_info, &ticket,
orig_bytes);
ret = 0;
- priority_reclaim_metadata_space(root->fs_info, space_info, &ticket);
+ priority_reclaim_metadata_space(fs_info, space_info, &ticket);
spin_lock(&space_info->lock);
if (ticket.bytes) {
if (ticket.bytes < orig_bytes) {
u64 num_bytes = orig_bytes - ticket.bytes;
space_info->bytes_may_use -= num_bytes;
- trace_btrfs_space_reservation(root->fs_info,
- "space_info", space_info->flags,
- num_bytes, 0);
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags,
+ num_bytes, 0);
}
list_del_init(&ticket.list);
@@ -5249,22 +5242,20 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
int ret;
ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
flush);
if (ret == -ENOSPC &&
unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
- struct btrfs_block_rsv *global_rsv =
- &root->fs_info->global_block_rsv;
-
if (block_rsv != global_rsv &&
!block_rsv_use_bytes(global_rsv, orig_bytes))
ret = 0;
}
if (ret == -ENOSPC)
- trace_btrfs_space_reservation(root->fs_info,
- "space_info:enospc",
+ trace_btrfs_space_reservation(fs_info, "space_info:enospc",
block_rsv->space_info->flags,
orig_bytes, 1);
return ret;
@@ -5274,18 +5265,19 @@ static struct btrfs_block_rsv *get_block_rsv(
const struct btrfs_trans_handle *trans,
const struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *block_rsv = NULL;
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
- (root == root->fs_info->csum_root && trans->adding_csums) ||
- (root == root->fs_info->uuid_root))
+ (root == fs_info->csum_root && trans->adding_csums) ||
+ (root == fs_info->uuid_root))
block_rsv = trans->block_rsv;
if (!block_rsv)
block_rsv = root->block_rsv;
if (!block_rsv)
- block_rsv = &root->fs_info->empty_block_rsv;
+ block_rsv = &fs_info->empty_block_rsv;
return block_rsv;
}
@@ -5507,11 +5499,10 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
rsv->type = type;
}
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type)
{
struct btrfs_block_rsv *block_rsv;
- struct btrfs_fs_info *fs_info = root->fs_info;
block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
if (!block_rsv)
@@ -5523,12 +5514,12 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
return block_rsv;
}
-void btrfs_free_block_rsv(struct btrfs_root *root,
+void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
if (!rsv)
return;
- btrfs_block_rsv_release(root, rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
kfree(rsv);
}
@@ -5555,8 +5546,7 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
return ret;
}
-int btrfs_block_rsv_check(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv, int min_factor)
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
@@ -5603,16 +5593,16 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
-void btrfs_block_rsv_release(struct btrfs_root *root,
+void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
{
- struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+
if (global_rsv == block_rsv ||
block_rsv->space_info != global_rsv->space_info)
global_rsv = NULL;
- block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
- num_bytes);
+ block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes);
}
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
@@ -5707,7 +5697,7 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
}
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
if (!trans->block_rsv)
return;
@@ -5715,9 +5705,10 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
if (!trans->bytes_reserved)
return;
- trace_btrfs_space_reservation(root->fs_info, "transaction",
+ trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid, trans->bytes_reserved, 0);
- btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv,
+ trans->bytes_reserved);
trans->bytes_reserved = 0;
}
@@ -5743,6 +5734,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
/*
* We always use trans->block_rsv here as we will have reserved space
@@ -5758,19 +5750,22 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
* added it, so this takes the reservation so we can release it later
* when we are truly done with the orphan item.
*/
- u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
- trace_btrfs_space_reservation(root->fs_info, "orphan",
+ u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+
+ trace_btrfs_space_reservation(fs_info, "orphan",
btrfs_ino(inode), num_bytes, 1);
return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
}
void btrfs_orphan_release_metadata(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
- trace_btrfs_space_reservation(root->fs_info, "orphan",
+ u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+
+ trace_btrfs_space_reservation(fs_info, "orphan",
btrfs_ino(inode), num_bytes, 0);
- btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
}
/*
@@ -5795,11 +5790,12 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
{
u64 num_bytes;
int ret;
- struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
/* One for parent inode, two for dir entries */
- num_bytes = 3 * root->nodesize;
+ num_bytes = 3 * fs_info->nodesize;
ret = btrfs_qgroup_reserve_meta(root, num_bytes);
if (ret)
return ret;
@@ -5809,8 +5805,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
*qgroup_reserved = num_bytes;
- num_bytes = btrfs_calc_trans_metadata_size(root, items);
- rsv->space_info = __find_space_info(root->fs_info,
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
+ rsv->space_info = __find_space_info(fs_info,
BTRFS_BLOCK_GROUP_METADATA);
ret = btrfs_block_rsv_add(root, rsv, num_bytes,
BTRFS_RESERVE_FLUSH_ALL);
@@ -5824,11 +5820,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
return ret;
}
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
u64 qgroup_reserved)
{
- btrfs_block_rsv_release(root, rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
}
/**
@@ -5894,35 +5890,38 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
int reserve)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 old_csums, num_csums;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
BTRFS_I(inode)->csum_bytes == 0)
return 0;
- old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
+ old_csums = btrfs_csum_bytes_to_leaves(fs_info,
+ BTRFS_I(inode)->csum_bytes);
if (reserve)
BTRFS_I(inode)->csum_bytes += num_bytes;
else
BTRFS_I(inode)->csum_bytes -= num_bytes;
- num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
+ num_csums = btrfs_csum_bytes_to_leaves(fs_info,
+ BTRFS_I(inode)->csum_bytes);
/* No change, no need to reserve more */
if (old_csums == num_csums)
return 0;
if (reserve)
- return btrfs_calc_trans_metadata_size(root,
+ return btrfs_calc_trans_metadata_size(fs_info,
num_csums - old_csums);
- return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
+ return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums);
}
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
+ struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv;
u64 to_reserve = 0;
u64 csum_bytes;
unsigned nr_extents = 0;
@@ -5949,13 +5948,13 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
}
if (flush != BTRFS_RESERVE_NO_FLUSH &&
- btrfs_transaction_in_commit(root->fs_info))
+ btrfs_transaction_in_commit(fs_info))
schedule_timeout(1);
if (delalloc_lock)
mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
- num_bytes = ALIGN(num_bytes, root->sectorsize);
+ num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
spin_lock(&BTRFS_I(inode)->lock);
nr_extents = (unsigned)div64_u64(num_bytes +
@@ -5970,28 +5969,29 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
BTRFS_I(inode)->reserved_extents;
/* We always want to reserve a slot for updating the inode. */
- to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents + 1);
+ to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1);
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
ret = btrfs_qgroup_reserve_meta(root,
- nr_extents * root->nodesize);
+ nr_extents * fs_info->nodesize);
if (ret)
goto out_fail;
}
ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
if (unlikely(ret)) {
- btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
+ btrfs_qgroup_free_meta(root,
+ nr_extents * fs_info->nodesize);
goto out_fail;
}
spin_lock(&BTRFS_I(inode)->lock);
if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
&BTRFS_I(inode)->runtime_flags)) {
- to_reserve -= btrfs_calc_trans_metadata_size(root, 1);
+ to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1);
release_extra = true;
}
BTRFS_I(inode)->reserved_extents += nr_extents;
@@ -6001,12 +6001,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
if (to_reserve)
- trace_btrfs_space_reservation(root->fs_info, "delalloc",
+ trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), to_reserve, 1);
if (release_extra)
- btrfs_block_rsv_release(root, block_rsv,
- btrfs_calc_trans_metadata_size(root,
- 1));
+ btrfs_block_rsv_release(fs_info, block_rsv,
+ btrfs_calc_trans_metadata_size(fs_info, 1));
return 0;
out_fail:
@@ -6061,11 +6060,11 @@ out_fail:
}
spin_unlock(&BTRFS_I(inode)->lock);
if (dropped)
- to_free += btrfs_calc_trans_metadata_size(root, dropped);
+ to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
if (to_free) {
- btrfs_block_rsv_release(root, block_rsv, to_free);
- trace_btrfs_space_reservation(root->fs_info, "delalloc",
+ btrfs_block_rsv_release(fs_info, block_rsv, to_free);
+ trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
}
if (delalloc_lock)
@@ -6084,11 +6083,11 @@ out_fail:
*/
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 to_free = 0;
unsigned dropped;
- num_bytes = ALIGN(num_bytes, root->sectorsize);
+ num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
spin_lock(&BTRFS_I(inode)->lock);
dropped = drop_outstanding_extent(inode, num_bytes);
@@ -6096,16 +6095,15 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
to_free = calc_csum_metadata_size(inode, num_bytes, 0);
spin_unlock(&BTRFS_I(inode)->lock);
if (dropped > 0)
- to_free += btrfs_calc_trans_metadata_size(root, dropped);
+ to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
- if (btrfs_is_testing(root->fs_info))
+ if (btrfs_is_testing(fs_info))
return;
- trace_btrfs_space_reservation(root->fs_info, "delalloc",
+ trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
- btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
- to_free);
+ btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free);
}
/**
@@ -6166,11 +6164,10 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
}
static int update_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr,
+ struct btrfs_fs_info *info, u64 bytenr,
u64 num_bytes, int alloc)
{
struct btrfs_block_group_cache *cache = NULL;
- struct btrfs_fs_info *info = root->fs_info;
u64 total = num_bytes;
u64 old_val;
u64 byte_in_group;
@@ -6211,7 +6208,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
- if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
+ if (btrfs_test_opt(info, SPACE_CACHE) &&
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;
@@ -6236,7 +6233,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- trace_btrfs_space_reservation(root->fs_info, "pinned",
+ trace_btrfs_space_reservation(info, "pinned",
cache->space_info->flags,
num_bytes, 1);
set_extent_dirty(info->pinned_extents,
@@ -6276,19 +6273,19 @@ static int update_block_group(struct btrfs_trans_handle *trans,
return 0;
}
-static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
+static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
{
struct btrfs_block_group_cache *cache;
u64 bytenr;
- spin_lock(&root->fs_info->block_group_cache_lock);
- bytenr = root->fs_info->first_logical_byte;
- spin_unlock(&root->fs_info->block_group_cache_lock);
+ spin_lock(&fs_info->block_group_cache_lock);
+ bytenr = fs_info->first_logical_byte;
+ spin_unlock(&fs_info->block_group_cache_lock);
if (bytenr < (u64)-1)
return bytenr;
- cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
+ cache = btrfs_lookup_first_block_group(fs_info, search_start);
if (!cache)
return 0;
@@ -6298,7 +6295,7 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
return bytenr;
}
-static int pin_down_extent(struct btrfs_root *root,
+static int pin_down_extent(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
u64 bytenr, u64 num_bytes, int reserved)
{
@@ -6313,9 +6310,9 @@ static int pin_down_extent(struct btrfs_root *root,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- trace_btrfs_space_reservation(root->fs_info, "pinned",
+ trace_btrfs_space_reservation(fs_info, "pinned",
cache->space_info->flags, num_bytes, 1);
- set_extent_dirty(root->fs_info->pinned_extents, bytenr,
+ set_extent_dirty(fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
return 0;
}
@@ -6323,15 +6320,15 @@ static int pin_down_extent(struct btrfs_root *root,
/*
* this function must be called within transaction
*/
-int btrfs_pin_extent(struct btrfs_root *root,
+int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, int reserved)
{
struct btrfs_block_group_cache *cache;
- cache = btrfs_lookup_block_group(root->fs_info, bytenr);
+ cache = btrfs_lookup_block_group(fs_info, bytenr);
BUG_ON(!cache); /* Logic error */
- pin_down_extent(root, cache, bytenr, num_bytes, reserved);
+ pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
btrfs_put_block_group(cache);
return 0;
@@ -6340,13 +6337,13 @@ int btrfs_pin_extent(struct btrfs_root *root,
/*
* this function must be called within transaction
*/
-int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
struct btrfs_block_group_cache *cache;
int ret;
- cache = btrfs_lookup_block_group(root->fs_info, bytenr);
+ cache = btrfs_lookup_block_group(fs_info, bytenr);
if (!cache)
return -EINVAL;
@@ -6358,7 +6355,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
*/
cache_block_group(cache, 1);
- pin_down_extent(root, cache, bytenr, num_bytes, 0);
+ pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
/* remove us from the free space cache (if we're there at all) */
ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
@@ -6366,13 +6363,14 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
return ret;
}
-static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
+static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
+ u64 start, u64 num_bytes)
{
int ret;
struct btrfs_block_group_cache *block_group;
struct btrfs_caching_control *caching_ctl;
- block_group = btrfs_lookup_block_group(root->fs_info, start);
+ block_group = btrfs_lookup_block_group(fs_info, start);
if (!block_group)
return -EINVAL;
@@ -6387,7 +6385,7 @@ static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_b
mutex_lock(&caching_ctl->mutex);
if (start >= caching_ctl->progress) {
- ret = add_excluded_extent(root, start, num_bytes);
+ ret = add_excluded_extent(fs_info, start, num_bytes);
} else if (start + num_bytes <= caching_ctl->progress) {
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
@@ -6401,7 +6399,7 @@ static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_b
num_bytes = (start + num_bytes) -
caching_ctl->progress;
start = caching_ctl->progress;
- ret = add_excluded_extent(root, start, num_bytes);
+ ret = add_excluded_extent(fs_info, start, num_bytes);
}
out_lock:
mutex_unlock(&caching_ctl->mutex);
@@ -6411,7 +6409,7 @@ out_lock:
return ret;
}
-int btrfs_exclude_logged_extents(struct btrfs_root *log,
+int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
struct btrfs_file_extent_item *item;
@@ -6419,7 +6417,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
int found_type;
int i;
- if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
+ if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
return 0;
for (i = 0; i < btrfs_header_nritems(eb); i++) {
@@ -6434,7 +6432,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
continue;
key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
- __exclude_logged_extent(log, key.objectid, key.offset);
+ __exclude_logged_extent(fs_info, key.objectid, key.offset);
}
return 0;
@@ -6499,16 +6497,9 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
* @num_bytes: The number of bytes in question
* @delalloc: The blocks are allocated for the delalloc write
*
- * This is called by the allocator when it reserves space. Metadata
- * reservations should be called with RESERVE_ALLOC so we do the proper
- * ENOSPC accounting. For data we handle the reservation through clearing the
- * delalloc bits in the io_tree. We have to do this since we could end up
- * allocating less disk space for the amount of data we have reserved in the
- * case of compression.
- *
- * If this is a reservation and the block group has become read only we cannot
- * make the reservation and return -EAGAIN, otherwise this function always
- * succeeds.
+ * This is called by the allocator when it reserves space. If this is a
+ * reservation and the block group has become read only we cannot make the
+ * reservation and return -EAGAIN, otherwise this function always succeeds.
*/
static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 ram_bytes, u64 num_bytes, int delalloc)
@@ -6568,9 +6559,8 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
return ret;
}
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_caching_control *next;
struct btrfs_caching_control *caching_ctl;
struct btrfs_block_group_cache *cache;
@@ -6604,11 +6594,11 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
* what it should be based on the mount options.
*/
static struct btrfs_free_cluster *
-fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
- u64 *empty_cluster)
+fetch_cluster_info(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info, u64 *empty_cluster)
{
struct btrfs_free_cluster *ret = NULL;
- bool ssd = btrfs_test_opt(root->fs_info, SSD);
+ bool ssd = btrfs_test_opt(fs_info, SSD);
*empty_cluster = 0;
if (btrfs_mixed_space_info(space_info))
@@ -6617,20 +6607,20 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
if (ssd)
*empty_cluster = SZ_2M;
if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
- ret = &root->fs_info->meta_alloc_cluster;
+ ret = &fs_info->meta_alloc_cluster;
if (!ssd)
*empty_cluster = SZ_64K;
} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
- ret = &root->fs_info->data_alloc_cluster;
+ ret = &fs_info->data_alloc_cluster;
}
return ret;
}
-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
+static int unpin_extent_range(struct btrfs_fs_info *fs_info,
+ u64 start, u64 end,
const bool return_free_space)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_space_info *space_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
@@ -6650,7 +6640,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
cache = btrfs_lookup_block_group(fs_info, start);
BUG_ON(!cache); /* Logic error */
- cluster = fetch_cluster_info(root,
+ cluster = fetch_cluster_info(fs_info,
cache->space_info,
&empty_cluster);
empty_cluster <<= 1;
@@ -6729,9 +6719,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
}
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *block_group, *tmp;
struct list_head *deleted_bgs;
struct extent_io_tree *unpin;
@@ -6753,12 +6742,12 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
break;
}
- if (btrfs_test_opt(root->fs_info, DISCARD))
- ret = btrfs_discard_extent(root, start,
+ if (btrfs_test_opt(fs_info, DISCARD))
+ ret = btrfs_discard_extent(fs_info, start,
end + 1 - start, NULL);
clear_extent_dirty(unpin, start, end);
- unpin_extent_range(root, start, end, true);
+ unpin_extent_range(fs_info, start, end, true);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
@@ -6774,7 +6763,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
ret = -EROFS;
if (!trans->aborted)
- ret = btrfs_discard_extent(root,
+ ret = btrfs_discard_extent(fs_info,
block_group->key.objectid,
block_group->key.offset,
&trimmed);
@@ -6816,7 +6805,7 @@ static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *info,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
@@ -6824,7 +6813,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_key key;
struct btrfs_path *path;
- struct btrfs_fs_info *info = root->fs_info;
struct btrfs_root *extent_root = info->extent_root;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
@@ -6839,8 +6827,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
int last_ref = 0;
- bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
- SKINNY_METADATA);
+ bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
path = btrfs_alloc_path();
if (!path)
@@ -6937,8 +6924,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
"umm, got %d back from search, was looking for %llu",
ret, bytenr);
if (ret > 0)
- btrfs_print_leaf(extent_root,
- path->nodes[0]);
+ btrfs_print_leaf(info, path->nodes[0]);
}
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
@@ -6947,7 +6933,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
extent_slot = path->slots[0];
}
} else if (WARN_ON(ret == -ENOENT)) {
- btrfs_print_leaf(extent_root, path->nodes[0]);
+ btrfs_print_leaf(info, path->nodes[0]);
btrfs_err(info,
"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
bytenr, parent, root_objectid, owner_objectid,
@@ -6984,7 +6970,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_err(info,
"umm, got %d back from search, was looking for %llu",
ret, bytenr);
- btrfs_print_leaf(extent_root, path->nodes[0]);
+ btrfs_print_leaf(info, path->nodes[0]);
}
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
@@ -7040,7 +7026,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
goto out;
}
}
- add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
+ add_pinned_bytes(info, -num_bytes, owner_objectid,
root_objectid);
} else {
if (found_extent) {
@@ -7065,21 +7051,20 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
if (is_data) {
- ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
+ ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
- ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
- num_bytes);
+ ret = add_to_free_space_tree(trans, info, bytenr, num_bytes);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- ret = update_block_group(trans, root, bytenr, num_bytes, 0);
+ ret = update_block_group(trans, info, bytenr, num_bytes, 0);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -7099,7 +7084,7 @@ out:
* removes it from the tree.
*/
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr)
+ u64 bytenr)
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -7169,15 +7154,17 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct extent_buffer *buf,
u64 parent, int last_ref)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int pin = 1;
int ret;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
- buf->start, buf->len,
- parent, root->root_key.objectid,
- btrfs_header_level(buf),
- BTRFS_DROP_DELAYED_REF, NULL);
+ ret = btrfs_add_delayed_tree_ref(fs_info, trans,
+ buf->start, buf->len,
+ parent,
+ root->root_key.objectid,
+ btrfs_header_level(buf),
+ BTRFS_DROP_DELAYED_REF, NULL);
BUG_ON(ret); /* -ENOMEM */
}
@@ -7188,15 +7175,16 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *cache;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- ret = check_ref_cleanup(trans, root, buf->start);
+ ret = check_ref_cleanup(trans, buf->start);
if (!ret)
goto out;
}
- cache = btrfs_lookup_block_group(root->fs_info, buf->start);
+ cache = btrfs_lookup_block_group(fs_info, buf->start);
if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
- pin_down_extent(root, cache, buf->start, buf->len, 1);
+ pin_down_extent(fs_info, cache, buf->start,
+ buf->len, 1);
btrfs_put_block_group(cache);
goto out;
}
@@ -7206,13 +7194,12 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
btrfs_add_free_space(cache, buf->start, buf->len);
btrfs_free_reserved_bytes(cache, buf->len, 0);
btrfs_put_block_group(cache);
- trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
+ trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
pin = 0;
}
out:
if (pin)
- add_pinned_bytes(root->fs_info, buf->len,
- btrfs_header_level(buf),
+ add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
root->root_key.objectid);
/*
@@ -7223,17 +7210,17 @@ out:
}
/* Can return -ENOMEM */
-int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+int btrfs_free_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_is_testing(fs_info))
return 0;
- add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
+ add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
/*
* tree log blocks never actually go into the extent allocation
@@ -7242,7 +7229,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
/* unlocks the pinned mutex */
- btrfs_pin_extent(root, bytenr, num_bytes, 1);
+ btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
ret = 0;
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
@@ -7433,8 +7420,9 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
u64 hint_byte, struct btrfs_key *ins,
u64 flags, int delalloc)
{
+ struct btrfs_fs_info *fs_info = orig_root->fs_info;
int ret = 0;
- struct btrfs_root *root = orig_root->fs_info->extent_root;
+ struct btrfs_root *root = fs_info->extent_root;
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL;
u64 search_start = 0;
@@ -7450,16 +7438,16 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
bool orig_have_caching_bg = false;
bool full_search = false;
- WARN_ON(num_bytes < root->sectorsize);
+ WARN_ON(num_bytes < fs_info->sectorsize);
ins->type = BTRFS_EXTENT_ITEM_KEY;
ins->objectid = 0;
ins->offset = 0;
- trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
+ trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
- space_info = __find_space_info(root->fs_info, flags);
+ space_info = __find_space_info(fs_info, flags);
if (!space_info) {
- btrfs_err(root->fs_info, "No space info for %llu", flags);
+ btrfs_err(fs_info, "No space info for %llu", flags);
return -ENOSPC;
}
@@ -7486,7 +7474,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
spin_unlock(&space_info->lock);
}
- last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
+ last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
if (last_ptr) {
spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
@@ -7503,11 +7491,10 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
spin_unlock(&last_ptr->lock);
}
- search_start = max(search_start, first_logical_byte(root, 0));
+ search_start = max(search_start, first_logical_byte(fs_info, 0));
search_start = max(search_start, hint_byte);
if (search_start == hint_byte) {
- block_group = btrfs_lookup_block_group(root->fs_info,
- search_start);
+ block_group = btrfs_lookup_block_group(fs_info, search_start);
/*
* we don't want to use the block group if it doesn't match our
* allocation bits, or if its not cached.
@@ -7615,7 +7602,7 @@ have_block_group:
if (offset) {
/* we have a block, we're done */
spin_unlock(&last_ptr->refill_lock);
- trace_btrfs_reserve_extent_cluster(root,
+ trace_btrfs_reserve_extent_cluster(fs_info,
used_block_group,
search_start, num_bytes);
if (used_block_group != block_group) {
@@ -7671,7 +7658,7 @@ refill_cluster:
block_group->full_stripe_len);
/* allocate a cluster in this block group */
- ret = btrfs_find_space_cluster(root, block_group,
+ ret = btrfs_find_space_cluster(fs_info, block_group,
last_ptr, search_start,
num_bytes,
aligned_cluster);
@@ -7688,7 +7675,7 @@ refill_cluster:
if (offset) {
/* we found one, proceed */
spin_unlock(&last_ptr->refill_lock);
- trace_btrfs_reserve_extent_cluster(root,
+ trace_btrfs_reserve_extent_cluster(fs_info,
block_group, search_start,
num_bytes);
goto checks;
@@ -7760,7 +7747,7 @@ unclustered_alloc:
goto loop;
}
checks:
- search_start = ALIGN(offset, root->stripesize);
+ search_start = ALIGN(offset, fs_info->stripesize);
/* move on to the next group */
if (search_start + num_bytes >
@@ -7786,7 +7773,7 @@ checks:
ins->objectid = search_start;
ins->offset = num_bytes;
- trace_btrfs_reserve_extent(orig_root, block_group,
+ trace_btrfs_reserve_extent(fs_info, block_group,
search_start, num_bytes);
btrfs_release_block_group(block_group, delalloc);
break;
@@ -7847,7 +7834,7 @@ loop:
goto out;
}
- ret = do_chunk_alloc(trans, root, flags,
+ ret = do_chunk_alloc(trans, fs_info, flags,
CHUNK_ALLOC_FORCE);
/*
@@ -7867,7 +7854,7 @@ loop:
else
ret = 0;
if (!exist)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret)
goto out;
}
@@ -7959,7 +7946,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
flags = btrfs_get_alloc_profile(root, is_data);
again:
- WARN_ON(num_bytes < root->sectorsize);
+ WARN_ON(num_bytes < fs_info->sectorsize);
ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
hint_byte, ins, flags, delalloc);
if (!ret && !is_data) {
@@ -7967,7 +7954,8 @@ again:
} else if (ret == -ENOSPC) {
if (!final_tried && ins->offset) {
num_bytes = min(num_bytes >> 1, ins->offset);
- num_bytes = round_down(num_bytes, root->sectorsize);
+ num_bytes = round_down(num_bytes,
+ fs_info->sectorsize);
num_bytes = max(num_bytes, min_alloc_size);
ram_bytes = num_bytes;
if (num_bytes == min_alloc_size)
@@ -7977,7 +7965,7 @@ again:
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(fs_info, flags);
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"allocation failed flags %llu, wanted %llu",
flags, num_bytes);
if (sinfo)
@@ -7988,54 +7976,53 @@ again:
return ret;
}
-static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len,
int pin, int delalloc)
{
struct btrfs_block_group_cache *cache;
int ret = 0;
- cache = btrfs_lookup_block_group(root->fs_info, start);
+ cache = btrfs_lookup_block_group(fs_info, start);
if (!cache) {
- btrfs_err(root->fs_info, "Unable to find block group for %llu",
- start);
+ btrfs_err(fs_info, "Unable to find block group for %llu",
+ start);
return -ENOSPC;
}
if (pin)
- pin_down_extent(root, cache, start, len, 1);
+ pin_down_extent(fs_info, cache, start, len, 1);
else {
- if (btrfs_test_opt(root->fs_info, DISCARD))
- ret = btrfs_discard_extent(root, start, len, NULL);
+ if (btrfs_test_opt(fs_info, DISCARD))
+ ret = btrfs_discard_extent(fs_info, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_free_reserved_bytes(cache, len, delalloc);
- trace_btrfs_reserved_extent_free(root, start, len);
+ trace_btrfs_reserved_extent_free(fs_info, start, len);
}
btrfs_put_block_group(cache);
return ret;
}
-int btrfs_free_reserved_extent(struct btrfs_root *root,
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc)
{
- return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
+ return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
}
-int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
- return __btrfs_free_reserved_extent(root, start, len, 1, 0);
+ return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
}
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_extent_item *extent_item;
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
@@ -8094,24 +8081,23 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
+ ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
if (ret) { /* -ENOENT, logic error */
btrfs_err(fs_info, "update block group failed for %llu %llu",
ins->objectid, ins->offset);
BUG();
}
- trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
+ trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
return ret;
}
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins)
{
int ret;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_extent_item *extent_item;
struct btrfs_tree_block_info *block_info;
struct btrfs_extent_inline_ref *iref;
@@ -8119,16 +8105,15 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
u32 size = sizeof(*extent_item) + sizeof(*iref);
u64 num_bytes = ins->offset;
- bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
- SKINNY_METADATA);
+ bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
if (!skinny_metadata)
size += sizeof(*block_info);
path = btrfs_alloc_path();
if (!path) {
- btrfs_free_and_pin_reserved_extent(root, ins->objectid,
- root->nodesize);
+ btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
+ fs_info->nodesize);
return -ENOMEM;
}
@@ -8137,8 +8122,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
ins, size);
if (ret) {
btrfs_free_path(path);
- btrfs_free_and_pin_reserved_extent(root, ins->objectid,
- root->nodesize);
+ btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
+ fs_info->nodesize);
return ret;
}
@@ -8152,7 +8137,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
if (skinny_metadata) {
iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
- num_bytes = root->nodesize;
+ num_bytes = fs_info->nodesize;
} else {
block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
btrfs_set_tree_block_key(leaf, block_info, key);
@@ -8179,29 +8164,30 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = update_block_group(trans, root, ins->objectid, root->nodesize,
- 1);
+ ret = update_block_group(trans, fs_info, ins->objectid,
+ fs_info->nodesize, 1);
if (ret) { /* -ENOENT, logic error */
btrfs_err(fs_info, "update block group failed for %llu %llu",
ins->objectid, ins->offset);
BUG();
}
- trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
+ trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid,
+ fs_info->nodesize);
return ret;
}
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
u64 root_objectid, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
+ ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
ins->offset, 0,
root_objectid, owner, offset,
ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
@@ -8215,7 +8201,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
* space cache bits as well
*/
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins)
{
@@ -8227,13 +8213,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
* Mixed block groups will exclude before processing the log so we only
* need to do the exclude dance if this fs isn't mixed.
*/
- if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
- ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
+ if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
+ ret = __exclude_logged_extent(fs_info, ins->objectid,
+ ins->offset);
if (ret)
return ret;
}
- block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
+ block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
if (!block_group)
return -EINVAL;
@@ -8245,7 +8232,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
- ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
+ ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
0, owner, offset, ins, 1);
btrfs_put_block_group(block_group);
return ret;
@@ -8255,16 +8242,17 @@ static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u64 bytenr, int level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return buf;
btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
- clean_tree_block(trans, root->fs_info, buf);
+ clean_tree_block(trans, fs_info, buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
btrfs_set_lock_blocking(buf);
@@ -8296,8 +8284,9 @@ static struct btrfs_block_rsv *
use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u32 blocksize)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *block_rsv;
- struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
int ret;
bool global_updated = false;
@@ -8315,11 +8304,11 @@ again:
if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
global_updated = true;
- update_global_block_rsv(root->fs_info);
+ update_global_block_rsv(fs_info);
goto again;
}
- if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
+ if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
/*DEFAULT_RATELIMIT_BURST*/ 1);
@@ -8363,18 +8352,18 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_disk_key *key, int level,
u64 hint, u64 empty_size)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key ins;
struct btrfs_block_rsv *block_rsv;
struct extent_buffer *buf;
struct btrfs_delayed_extent_op *extent_op;
u64 flags = 0;
int ret;
- u32 blocksize = root->nodesize;
- bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
- SKINNY_METADATA);
+ u32 blocksize = fs_info->nodesize;
+ bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (btrfs_is_testing(root->fs_info)) {
+ if (btrfs_is_testing(fs_info)) {
buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
level);
if (!IS_ERR(buf))
@@ -8421,7 +8410,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
extent_op->is_data = false;
extent_op->level = level;
- ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
+ ret = btrfs_add_delayed_tree_ref(fs_info, trans,
ins.objectid, ins.offset,
parent, root_objectid, level,
BTRFS_ADD_DELAYED_EXTENT,
@@ -8436,9 +8425,9 @@ out_free_delayed:
out_free_buf:
free_extent_buffer(buf);
out_free_reserved:
- btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
out_unuse:
- unuse_block_rsv(root->fs_info, block_rsv, blocksize);
+ unuse_block_rsv(fs_info, block_rsv, blocksize);
return ERR_PTR(ret);
}
@@ -8464,6 +8453,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
struct walk_control *wc,
struct btrfs_path *path)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 generation;
u64 refs;
@@ -8481,7 +8471,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
} else {
wc->reada_count = wc->reada_count * 3 / 2;
wc->reada_count = min_t(int, wc->reada_count,
- BTRFS_NODEPTRS_PER_BLOCK(root));
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info));
}
eb = path->nodes[wc->level];
@@ -8503,7 +8493,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
continue;
/* We don't lock the tree block, it's OK to be racy here */
- ret = btrfs_lookup_extent_info(trans, root, bytenr,
+ ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
wc->level - 1, 1, &refs,
&flags);
/* We don't care about errors in readahead. */
@@ -8532,226 +8522,12 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
continue;
}
reada:
- readahead_tree_block(root, bytenr);
+ readahead_tree_block(fs_info, bytenr);
nread++;
}
wc->reada_slot = slot;
}
-static int account_leaf_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *eb)
-{
- int nr = btrfs_header_nritems(eb);
- int i, extent_type, ret;
- struct btrfs_key key;
- struct btrfs_file_extent_item *fi;
- u64 bytenr, num_bytes;
-
- /* We can be called directly from walk_up_proc() */
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
- return 0;
-
- for (i = 0; i < nr; i++) {
- btrfs_item_key_to_cpu(eb, &key, i);
-
- if (key.type != BTRFS_EXTENT_DATA_KEY)
- continue;
-
- fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
- /* filter out non qgroup-accountable extents */
- extent_type = btrfs_file_extent_type(eb, fi);
-
- if (extent_type == BTRFS_FILE_EXTENT_INLINE)
- continue;
-
- bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
- if (!bytenr)
- continue;
-
- num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
-
- ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
- bytenr, num_bytes, GFP_NOFS);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-/*
- * Walk up the tree from the bottom, freeing leaves and any interior
- * nodes which have had all slots visited. If a node (leaf or
- * interior) is freed, the node above it will have it's slot
- * incremented. The root node will never be freed.
- *
- * At the end of this function, we should have a path which has all
- * slots incremented to the next position for a search. If we need to
- * read a new node it will be NULL and the node above it will have the
- * correct slot selected for a later read.
- *
- * If we increment the root nodes slot counter past the number of
- * elements, 1 is returned to signal completion of the search.
- */
-static int adjust_slots_upwards(struct btrfs_root *root,
- struct btrfs_path *path, int root_level)
-{
- int level = 0;
- int nr, slot;
- struct extent_buffer *eb;
-
- if (root_level == 0)
- return 1;
-
- while (level <= root_level) {
- eb = path->nodes[level];
- nr = btrfs_header_nritems(eb);
- path->slots[level]++;
- slot = path->slots[level];
- if (slot >= nr || level == 0) {
- /*
- * Don't free the root - we will detect this
- * condition after our loop and return a
- * positive value for caller to stop walking the tree.
- */
- if (level != root_level) {
- btrfs_tree_unlock_rw(eb, path->locks[level]);
- path->locks[level] = 0;
-
- free_extent_buffer(eb);
- path->nodes[level] = NULL;
- path->slots[level] = 0;
- }
- } else {
- /*
- * We have a valid slot to walk back down
- * from. Stop here so caller can process these
- * new nodes.
- */
- break;
- }
-
- level++;
- }
-
- eb = path->nodes[root_level];
- if (path->slots[root_level] >= btrfs_header_nritems(eb))
- return 1;
-
- return 0;
-}
-
-/*
- * root_eb is the subtree root and is locked before this function is called.
- */
-static int account_shared_subtree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *root_eb,
- u64 root_gen,
- int root_level)
-{
- int ret = 0;
- int level;
- struct extent_buffer *eb = root_eb;
- struct btrfs_path *path = NULL;
-
- BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
- BUG_ON(root_eb == NULL);
-
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
- return 0;
-
- if (!extent_buffer_uptodate(root_eb)) {
- ret = btrfs_read_buffer(root_eb, root_gen);
- if (ret)
- goto out;
- }
-
- if (root_level == 0) {
- ret = account_leaf_items(trans, root, root_eb);
- goto out;
- }
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- /*
- * Walk down the tree. Missing extent blocks are filled in as
- * we go. Metadata is accounted every time we read a new
- * extent block.
- *
- * When we reach a leaf, we account for file extent items in it,
- * walk back up the tree (adjusting slot pointers as we go)
- * and restart the search process.
- */
- extent_buffer_get(root_eb); /* For path */
- path->nodes[root_level] = root_eb;
- path->slots[root_level] = 0;
- path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
-walk_down:
- level = root_level;
- while (level >= 0) {
- if (path->nodes[level] == NULL) {
- int parent_slot;
- u64 child_gen;
- u64 child_bytenr;
-
- /* We need to get child blockptr/gen from
- * parent before we can read it. */
- eb = path->nodes[level + 1];
- parent_slot = path->slots[level + 1];
- child_bytenr = btrfs_node_blockptr(eb, parent_slot);
- child_gen = btrfs_node_ptr_generation(eb, parent_slot);
-
- eb = read_tree_block(root, child_bytenr, child_gen);
- if (IS_ERR(eb)) {
- ret = PTR_ERR(eb);
- goto out;
- } else if (!extent_buffer_uptodate(eb)) {
- free_extent_buffer(eb);
- ret = -EIO;
- goto out;
- }
-
- path->nodes[level] = eb;
- path->slots[level] = 0;
-
- btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
- path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
-
- ret = btrfs_qgroup_insert_dirty_extent(trans,
- root->fs_info, child_bytenr,
- root->nodesize, GFP_NOFS);
- if (ret)
- goto out;
- }
-
- if (level == 0) {
- ret = account_leaf_items(trans, root, path->nodes[level]);
- if (ret)
- goto out;
-
- /* Nonzero return here means we completed our search */
- ret = adjust_slots_upwards(root, path, root_level);
- if (ret)
- break;
-
- /* Restart search with new slots */
- goto walk_down;
- }
-
- level--;
- }
-
- ret = 0;
-out:
- btrfs_free_path(path);
-
- return ret;
-}
-
/*
* helper to process tree block while walking down the tree.
*
@@ -8765,6 +8541,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct walk_control *wc, int lookup_info)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int level = wc->level;
struct extent_buffer *eb = path->nodes[level];
u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
@@ -8782,7 +8559,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
BUG_ON(!path->locks[level]);
- ret = btrfs_lookup_extent_info(trans, root,
+ ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
&wc->flags[level]);
@@ -8810,7 +8587,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
+ ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
eb->len, flag,
btrfs_header_level(eb), 0);
BUG_ON(ret); /* -ENOMEM */
@@ -8846,6 +8623,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct walk_control *wc, int *lookup_info)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 generation;
u64 parent;
@@ -8871,11 +8649,11 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
}
bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
- next = btrfs_find_tree_block(root->fs_info, bytenr);
+ next = find_extent_buffer(fs_info, bytenr);
if (!next) {
- next = btrfs_find_create_tree_block(root, bytenr);
+ next = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(next))
return PTR_ERR(next);
@@ -8886,14 +8664,14 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
+ ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
&wc->refs[level - 1],
&wc->flags[level - 1]);
if (ret < 0)
goto out_unlock;
if (unlikely(wc->refs[level - 1] == 0)) {
- btrfs_err(root->fs_info, "Missing references.");
+ btrfs_err(fs_info, "Missing references.");
ret = -EIO;
goto out_unlock;
}
@@ -8935,7 +8713,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
if (!next) {
if (reada && level == 1)
reada_walk_down(trans, root, wc, path);
- next = read_tree_block(root, bytenr, generation);
+ next = read_tree_block(fs_info, bytenr, generation);
if (IS_ERR(next)) {
return PTR_ERR(next);
} else if (!extent_buffer_uptodate(next)) {
@@ -8980,16 +8758,17 @@ skip:
}
if (need_account) {
- ret = account_shared_subtree(trans, root, next,
- generation, level - 1);
+ ret = btrfs_qgroup_trace_subtree(trans, root, next,
+ generation, level - 1);
if (ret) {
- btrfs_err_rl(root->fs_info,
+ btrfs_err_rl(fs_info,
"Error %d accounting shared subtree. Quota is out of sync, rescan required.",
ret);
}
}
- ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
- root->root_key.objectid, level - 1, 0);
+ ret = btrfs_free_extent(trans, fs_info, bytenr, blocksize,
+ parent, root->root_key.objectid,
+ level - 1, 0);
if (ret)
goto out_unlock;
}
@@ -9021,6 +8800,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct walk_control *wc)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int level = wc->level;
struct extent_buffer *eb = path->nodes[level];
@@ -9050,7 +8830,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
btrfs_set_lock_blocking(eb);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
- ret = btrfs_lookup_extent_info(trans, root,
+ ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
&wc->flags[level]);
@@ -9078,9 +8858,9 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
else
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = account_leaf_items(trans, root, eb);
+ ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
if (ret) {
- btrfs_err_rl(root->fs_info,
+ btrfs_err_rl(fs_info,
"error %d accounting leaf items. Quota is out of sync, rescan required.",
ret);
}
@@ -9092,7 +8872,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
btrfs_set_lock_blocking(eb);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
}
- clean_tree_block(trans, root->fs_info, eb);
+ clean_tree_block(trans, fs_info, eb);
}
if (eb == root->node) {
@@ -9270,7 +9050,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
btrfs_set_lock_blocking(path->nodes[level]);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
- ret = btrfs_lookup_extent_info(trans, root,
+ ret = btrfs_lookup_extent_info(trans, fs_info,
path->nodes[level]->start,
level, 1, &wc->refs[level],
&wc->flags[level]);
@@ -9296,7 +9076,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
wc->update_ref = update_ref;
wc->keep_locks = 0;
wc->for_reloc = for_reloc;
- wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
+ wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
@@ -9326,8 +9106,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
}
BUG_ON(wc->level == 0);
- if (btrfs_should_end_transaction(trans, tree_root) ||
- (!for_reloc && btrfs_need_cleaner_sleep(root))) {
+ if (btrfs_should_end_transaction(trans) ||
+ (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
root_item);
@@ -9337,8 +9117,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
goto out_end_trans;
}
- btrfs_end_transaction_throttle(trans, tree_root);
- if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
+ btrfs_end_transaction_throttle(trans);
+ if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
btrfs_debug(fs_info,
"drop snapshot early exit");
err = -EAGAIN;
@@ -9391,7 +9171,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
}
root_dropped = true;
out_end_trans:
- btrfs_end_transaction_throttle(trans, tree_root);
+ btrfs_end_transaction_throttle(trans);
out_free:
kfree(wc);
btrfs_free_path(path);
@@ -9421,6 +9201,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *node,
struct extent_buffer *parent)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct walk_control *wc;
int level;
@@ -9460,7 +9241,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
wc->update_ref = 0;
wc->keep_locks = 1;
wc->for_reloc = 1;
- wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
+ wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
wret = walk_down_tree(trans, root, path, wc);
@@ -9481,7 +9262,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
return ret;
}
-static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
+static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 num_devices;
u64 stripped;
@@ -9490,11 +9271,11 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
* if restripe for this chunk_type is on pick target profile and
* return, otherwise do the usual balance
*/
- stripped = get_restripe_target(root->fs_info, flags);
+ stripped = get_restripe_target(fs_info, flags);
if (stripped)
return extended_to_chunk(stripped);
- num_devices = root->fs_info->fs_devices->rw_devices;
+ num_devices = fs_info->fs_devices->rw_devices;
stripped = BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
@@ -9579,6 +9360,7 @@ int btrfs_inc_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
u64 alloc_flags;
int ret;
@@ -9593,14 +9375,14 @@ again:
* block groups cache has started writing. If it already started,
* back off and let this transaction commit
*/
- mutex_lock(&root->fs_info->ro_block_group_mutex);
+ mutex_lock(&fs_info->ro_block_group_mutex);
if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
u64 transid = trans->transid;
- mutex_unlock(&root->fs_info->ro_block_group_mutex);
- btrfs_end_transaction(trans, root);
+ mutex_unlock(&fs_info->ro_block_group_mutex);
+ btrfs_end_transaction(trans);
- ret = btrfs_wait_for_commit(root, transid);
+ ret = btrfs_wait_for_commit(fs_info, transid);
if (ret)
return ret;
goto again;
@@ -9610,9 +9392,9 @@ again:
* if we are changing raid levels, try to allocate a corresponding
* block group with the new raid level.
*/
- alloc_flags = update_block_group_flags(root, cache->flags);
+ alloc_flags = update_block_group_flags(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
- ret = do_chunk_alloc(trans, root, alloc_flags,
+ ret = do_chunk_alloc(trans, fs_info, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
@@ -9628,31 +9410,31 @@ again:
ret = inc_block_group_ro(cache, 0);
if (!ret)
goto out;
- alloc_flags = get_alloc_profile(root, cache->space_info->flags);
- ret = do_chunk_alloc(trans, root, alloc_flags,
+ alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
+ ret = do_chunk_alloc(trans, fs_info, alloc_flags,
CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
ret = inc_block_group_ro(cache, 0);
out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
- alloc_flags = update_block_group_flags(root, cache->flags);
- lock_chunks(root->fs_info->chunk_root);
- check_system_chunk(trans, root, alloc_flags);
- unlock_chunks(root->fs_info->chunk_root);
+ alloc_flags = update_block_group_flags(fs_info, cache->flags);
+ mutex_lock(&fs_info->chunk_mutex);
+ check_system_chunk(trans, fs_info, alloc_flags);
+ mutex_unlock(&fs_info->chunk_mutex);
}
- mutex_unlock(&root->fs_info->ro_block_group_mutex);
+ mutex_unlock(&fs_info->ro_block_group_mutex);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 type)
+ struct btrfs_fs_info *fs_info, u64 type)
{
- u64 alloc_flags = get_alloc_profile(root, type);
- return do_chunk_alloc(trans, root, alloc_flags,
- CHUNK_ALLOC_FORCE);
+ u64 alloc_flags = get_alloc_profile(fs_info, type);
+
+ return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
}
/*
@@ -9696,8 +9478,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
return free_bytes;
}
-void btrfs_dec_block_group_ro(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache)
+void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
@@ -9723,11 +9504,12 @@ void btrfs_dec_block_group_ro(struct btrfs_root *root,
* @return - -1 if it's not a good idea to relocate this block group, 0 if its
* ok to go ahead and try.
*/
-int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
+int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
{
+ struct btrfs_root *root = fs_info->extent_root;
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
struct btrfs_trans_handle *trans;
u64 min_free;
@@ -9739,14 +9521,14 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
int full = 0;
int ret = 0;
- debug = btrfs_test_opt(root->fs_info, ENOSPC_DEBUG);
+ debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
- block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
+ block_group = btrfs_lookup_block_group(fs_info, bytenr);
/* odd, couldn't find the block group, leave it alone */
if (!block_group) {
if (debug)
- btrfs_warn(root->fs_info,
+ btrfs_warn(fs_info,
"can't find block group for bytenr %llu",
bytenr);
return -1;
@@ -9796,7 +9578,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
* 3: raid0
* 4: single
*/
- target = get_restripe_target(root->fs_info, block_group->flags);
+ target = get_restripe_target(fs_info, block_group->flags);
if (target) {
index = __get_raid_index(extended_to_chunk(target));
} else {
@@ -9806,9 +9588,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
*/
if (full) {
if (debug)
- btrfs_warn(root->fs_info,
- "no space to alloc new chunk for block group %llu",
- block_group->key.objectid);
+ btrfs_warn(fs_info,
+ "no space to alloc new chunk for block group %llu",
+ block_group->key.objectid);
goto out;
}
@@ -9836,7 +9618,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
goto out;
}
- mutex_lock(&root->fs_info->chunk_mutex);
+ mutex_lock(&fs_info->chunk_mutex);
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
u64 dev_offset;
@@ -9858,19 +9640,21 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
}
}
if (debug && ret == -1)
- btrfs_warn(root->fs_info,
- "no space to allocate a new chunk for block group %llu",
- block_group->key.objectid);
- mutex_unlock(&root->fs_info->chunk_mutex);
- btrfs_end_transaction(trans, root);
+ btrfs_warn(fs_info,
+ "no space to allocate a new chunk for block group %llu",
+ block_group->key.objectid);
+ mutex_unlock(&fs_info->chunk_mutex);
+ btrfs_end_transaction(trans);
out:
btrfs_put_block_group(block_group);
return ret;
}
-static int find_first_block_group(struct btrfs_root *root,
- struct btrfs_path *path, struct btrfs_key *key)
+static int find_first_block_group(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
+ struct btrfs_key *key)
{
+ struct btrfs_root *root = fs_info->extent_root;
int ret = 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
@@ -9904,7 +9688,7 @@ static int find_first_block_group(struct btrfs_root *root,
found_key.offset);
read_unlock(&em_tree->lock);
if (!em) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"logical %llu len %llu found bg but no related chunk",
found_key.objectid, found_key.offset);
ret = -ENOENT;
@@ -9934,8 +9718,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
if (block_group->iref)
break;
spin_unlock(&block_group->lock);
- block_group = next_block_group(info->tree_root,
- block_group);
+ block_group = next_block_group(info, block_group);
}
if (!block_group) {
if (last == 0)
@@ -10003,7 +9786,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
*/
if (block_group->cached == BTRFS_CACHE_NO ||
block_group->cached == BTRFS_CACHE_ERROR)
- free_excluded_extents(info->extent_root, block_group);
+ free_excluded_extents(info, block_group);
btrfs_remove_free_space_cache(block_group);
ASSERT(list_empty(&block_group->dirty_list));
@@ -10094,7 +9877,8 @@ out_err:
}
static struct btrfs_block_group_cache *
-btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
+btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
+ u64 start, u64 size)
{
struct btrfs_block_group_cache *cache;
@@ -10113,11 +9897,11 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
cache->key.offset = size;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
- cache->sectorsize = root->sectorsize;
- cache->fs_info = root->fs_info;
- cache->full_stripe_len = btrfs_full_stripe_len(root,
- &root->fs_info->mapping_tree,
- start);
+ cache->sectorsize = fs_info->sectorsize;
+ cache->fs_info = fs_info;
+ cache->full_stripe_len = btrfs_full_stripe_len(fs_info,
+ &fs_info->mapping_tree,
+ start);
set_free_space_tree_thresholds(cache);
atomic_set(&cache->count, 1);
@@ -10136,12 +9920,11 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
return cache;
}
-int btrfs_read_block_groups(struct btrfs_root *root)
+int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_path *path;
int ret;
struct btrfs_block_group_cache *cache;
- struct btrfs_fs_info *info = root->fs_info;
struct btrfs_space_info *space_info;
struct btrfs_key key;
struct btrfs_key found_key;
@@ -10154,7 +9937,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
feature = btrfs_super_incompat_flags(info->super_copy);
mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
- root = info->extent_root;
key.objectid = 0;
key.offset = 0;
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
@@ -10163,15 +9945,15 @@ int btrfs_read_block_groups(struct btrfs_root *root)
return -ENOMEM;
path->reada = READA_FORWARD;
- cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
- if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
- btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
+ cache_gen = btrfs_super_cache_generation(info->super_copy);
+ if (btrfs_test_opt(info, SPACE_CACHE) &&
+ btrfs_super_generation(info->super_copy) != cache_gen)
need_clear = 1;
- if (btrfs_test_opt(root->fs_info, CLEAR_CACHE))
+ if (btrfs_test_opt(info, CLEAR_CACHE))
need_clear = 1;
while (1) {
- ret = find_first_block_group(root, path, &key);
+ ret = find_first_block_group(info, path, &key);
if (ret > 0)
break;
if (ret != 0)
@@ -10180,7 +9962,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- cache = btrfs_create_block_group_cache(root, found_key.objectid,
+ cache = btrfs_create_block_group_cache(info, found_key.objectid,
found_key.offset);
if (!cache) {
ret = -ENOMEM;
@@ -10198,7 +9980,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
* b) Setting 'dirty flag' makes sure that we flush
* the new space cache info onto disk.
*/
- if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
+ if (btrfs_test_opt(info, SPACE_CACHE))
cache->disk_cache_state = BTRFS_DC_CLEAR;
}
@@ -10224,13 +10006,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
* info has super bytes accounted for, otherwise we'll think
* we have more space than we actually do.
*/
- ret = exclude_super_stripes(root, cache);
+ ret = exclude_super_stripes(info, cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
- free_excluded_extents(root, cache);
+ free_excluded_extents(info, cache);
btrfs_put_block_group(cache);
goto error;
}
@@ -10245,25 +10027,25 @@ int btrfs_read_block_groups(struct btrfs_root *root)
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- free_excluded_extents(root, cache);
+ free_excluded_extents(info, cache);
} else if (btrfs_block_group_used(&cache->item) == 0) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- add_new_free_space(cache, root->fs_info,
+ add_new_free_space(cache, info,
found_key.objectid,
found_key.objectid +
found_key.offset);
- free_excluded_extents(root, cache);
+ free_excluded_extents(info, cache);
}
- ret = btrfs_add_block_group_cache(root->fs_info, cache);
+ ret = btrfs_add_block_group_cache(info, cache);
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
goto error;
}
- trace_btrfs_add_block_group(root->fs_info, cache, 0);
+ trace_btrfs_add_block_group(info, cache, 0);
ret = update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item),
cache->bytes_super, &space_info);
@@ -10282,8 +10064,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
__link_block_group(space_info, cache);
- set_avail_alloc_bits(root->fs_info, cache->flags);
- if (btrfs_chunk_readonly(root, cache->key.objectid)) {
+ set_avail_alloc_bits(info, cache->flags);
+ if (btrfs_chunk_readonly(info, cache->key.objectid)) {
inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) {
spin_lock(&info->unused_bgs_lock);
@@ -10297,8 +10079,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
}
}
- list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
- if (!(get_alloc_profile(root, space_info->flags) &
+ list_for_each_entry_rcu(space_info, &info->space_info, list) {
+ if (!(get_alloc_profile(info, space_info->flags) &
(BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
@@ -10327,10 +10109,10 @@ error:
}
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *block_group, *tmp;
- struct btrfs_root *extent_root = root->fs_info->extent_root;
+ struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_block_group_item item;
struct btrfs_key key;
int ret = 0;
@@ -10350,11 +10132,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
sizeof(item));
if (ret)
btrfs_abort_transaction(trans, ret);
- ret = btrfs_finish_chunk_alloc(trans, extent_root,
- key.objectid, key.offset);
+ ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
+ key.offset);
if (ret)
btrfs_abort_transaction(trans, ret);
- add_block_group_free_space(trans, root->fs_info, block_group);
+ add_block_group_free_space(trans, fs_info, block_group);
/* already aborted the transaction if it failed. */
next:
list_del_init(&block_group->bg_list);
@@ -10363,18 +10145,16 @@ next:
}
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytes_used,
+ struct btrfs_fs_info *fs_info, u64 bytes_used,
u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size)
{
- int ret;
- struct btrfs_root *extent_root;
struct btrfs_block_group_cache *cache;
- extent_root = root->fs_info->extent_root;
+ int ret;
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
- cache = btrfs_create_block_group_cache(root, chunk_offset, size);
+ cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
if (!cache)
return -ENOMEM;
@@ -10386,28 +10166,27 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
cache->needs_free_space = 1;
- ret = exclude_super_stripes(root, cache);
+ ret = exclude_super_stripes(fs_info, cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
- free_excluded_extents(root, cache);
+ free_excluded_extents(fs_info, cache);
btrfs_put_block_group(cache);
return ret;
}
- add_new_free_space(cache, root->fs_info, chunk_offset,
- chunk_offset + size);
+ add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size);
- free_excluded_extents(root, cache);
+ free_excluded_extents(fs_info, cache);
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(root, cache)) {
+ if (btrfs_should_fragment_free_space(cache)) {
u64 new_bytes_used = size - bytes_used;
bytes_used += new_bytes_used >> 1;
- fragment_free_space(root, cache);
+ fragment_free_space(cache);
}
#endif
/*
@@ -10415,7 +10194,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
* assigned to our block group, but don't update its counters just yet.
* We want our bg to be added to the rbtree with its ->space_info set.
*/
- ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0,
+ ret = update_space_info(fs_info, cache->flags, 0, 0, 0,
&cache->space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
@@ -10423,7 +10202,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
return ret;
}
- ret = btrfs_add_block_group_cache(root->fs_info, cache);
+ ret = btrfs_add_block_group_cache(fs_info, cache);
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
@@ -10434,26 +10213,26 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
* Now that our block group has its ->space_info set and is inserted in
* the rbtree, update the space info's counters.
*/
- trace_btrfs_add_block_group(root->fs_info, cache, 1);
- ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
+ trace_btrfs_add_block_group(fs_info, cache, 1);
+ ret = update_space_info(fs_info, cache->flags, size, bytes_used,
cache->bytes_super, &cache->space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
- spin_lock(&root->fs_info->block_group_cache_lock);
+ spin_lock(&fs_info->block_group_cache_lock);
rb_erase(&cache->cache_node,
- &root->fs_info->block_group_cache_tree);
+ &fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&cache->cache_node);
- spin_unlock(&root->fs_info->block_group_cache_lock);
+ spin_unlock(&fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
return ret;
}
- update_global_block_rsv(root->fs_info);
+ update_global_block_rsv(fs_info);
__link_block_group(cache->space_info, cache);
list_add_tail(&cache->bg_list, &trans->new_bgs);
- set_avail_alloc_bits(extent_root->fs_info, type);
+ set_avail_alloc_bits(fs_info, type);
return 0;
}
@@ -10473,13 +10252,14 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 group_start,
+ struct btrfs_fs_info *fs_info, u64 group_start,
struct extent_map *em)
{
+ struct btrfs_root *root = fs_info->extent_root;
struct btrfs_path *path;
struct btrfs_block_group_cache *block_group;
struct btrfs_free_cluster *cluster;
- struct btrfs_root *tree_root = root->fs_info->tree_root;
+ struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_key key;
struct inode *inode;
struct kobject *kobj = NULL;
@@ -10489,9 +10269,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_caching_control *caching_ctl = NULL;
bool remove_em;
- root = root->fs_info->extent_root;
-
- block_group = btrfs_lookup_block_group(root->fs_info, group_start);
+ block_group = btrfs_lookup_block_group(fs_info, group_start);
BUG_ON(!block_group);
BUG_ON(!block_group->ro);
@@ -10499,7 +10277,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* Free the reserved super bytes from this block group before
* remove it.
*/
- free_excluded_extents(root, block_group);
+ free_excluded_extents(fs_info, block_group);
memcpy(&key, &block_group->key, sizeof(key));
index = get_block_group_index(block_group);
@@ -10511,7 +10289,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
factor = 1;
/* make sure this block group isn't part of an allocation cluster */
- cluster = &root->fs_info->data_alloc_cluster;
+ cluster = &fs_info->data_alloc_cluster;
spin_lock(&cluster->refill_lock);
btrfs_return_cluster_to_free_space(block_group, cluster);
spin_unlock(&cluster->refill_lock);
@@ -10520,7 +10298,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* make sure this block group isn't part of a metadata
* allocation cluster
*/
- cluster = &root->fs_info->meta_alloc_cluster;
+ cluster = &fs_info->meta_alloc_cluster;
spin_lock(&cluster->refill_lock);
btrfs_return_cluster_to_free_space(block_group, cluster);
spin_unlock(&cluster->refill_lock);
@@ -10549,9 +10327,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
spin_unlock(&trans->transaction->dirty_bgs_lock);
- btrfs_wait_cache_io(root, trans, block_group,
- &block_group->io_ctl, path,
- block_group->key.objectid);
+ btrfs_wait_cache_io(trans, block_group, path);
btrfs_put_block_group(block_group);
spin_lock(&trans->transaction->dirty_bgs_lock);
}
@@ -10600,14 +10376,14 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
}
- spin_lock(&root->fs_info->block_group_cache_lock);
+ spin_lock(&fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
- &root->fs_info->block_group_cache_tree);
+ &fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
- if (root->fs_info->first_logical_byte == block_group->key.objectid)
- root->fs_info->first_logical_byte = (u64)-1;
- spin_unlock(&root->fs_info->block_group_cache_lock);
+ if (fs_info->first_logical_byte == block_group->key.objectid)
+ fs_info->first_logical_byte = (u64)-1;
+ spin_unlock(&fs_info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
/*
@@ -10618,7 +10394,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (list_empty(&block_group->space_info->block_groups[index])) {
kobj = block_group->space_info->block_group_kobjs[index];
block_group->space_info->block_group_kobjs[index] = NULL;
- clear_avail_alloc_bits(root->fs_info, block_group->flags);
+ clear_avail_alloc_bits(fs_info, block_group->flags);
}
up_write(&block_group->space_info->groups_sem);
if (kobj) {
@@ -10631,12 +10407,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (block_group->cached == BTRFS_CACHE_STARTED)
wait_block_group_cache_done(block_group);
if (block_group->has_caching_ctl) {
- down_write(&root->fs_info->commit_root_sem);
+ down_write(&fs_info->commit_root_sem);
if (!caching_ctl) {
struct btrfs_caching_control *ctl;
list_for_each_entry(ctl,
- &root->fs_info->caching_block_groups, list)
+ &fs_info->caching_block_groups, list)
if (ctl->block_group == block_group) {
caching_ctl = ctl;
atomic_inc(&caching_ctl->count);
@@ -10645,7 +10421,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
}
if (caching_ctl)
list_del_init(&caching_ctl->list);
- up_write(&root->fs_info->commit_root_sem);
+ up_write(&fs_info->commit_root_sem);
if (caching_ctl) {
/* Once for the caching bgs list and once for us. */
put_caching_control(caching_ctl);
@@ -10666,7 +10442,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_lock(&block_group->space_info->lock);
list_del_init(&block_group->ro_list);
- if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
+ if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
WARN_ON(block_group->space_info->total_bytes
< block_group->key.offset);
WARN_ON(block_group->space_info->bytes_readonly
@@ -10682,7 +10458,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
memcpy(&key, &block_group->key, sizeof(key));
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
if (!list_empty(&em->list)) {
/* We're in the transaction->pending_chunks list. */
free_extent_map(em);
@@ -10730,14 +10506,14 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* sees the em, either in the pending_chunks list or in the
* pinned_chunks list.
*/
- list_move_tail(&em->list, &root->fs_info->pinned_chunks);
+ list_move_tail(&em->list, &fs_info->pinned_chunks);
}
spin_unlock(&block_group->lock);
if (remove_em) {
struct extent_map_tree *em_tree;
- em_tree = &root->fs_info->mapping_tree.map_tree;
+ em_tree = &fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
/*
* The em might be in the pending_chunks list, so make sure the
@@ -10750,9 +10526,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
free_extent_map(em);
}
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
- ret = remove_block_group_free_space(trans, root->fs_info, block_group);
+ ret = remove_block_group_free_space(trans, fs_info, block_group);
if (ret)
goto out;
@@ -10820,7 +10596,6 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
- struct btrfs_root *root = fs_info->extent_root;
struct btrfs_trans_handle *trans;
int ret = 0;
@@ -10881,7 +10656,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
trans = btrfs_start_trans_remove_block_group(fs_info,
block_group->key.objectid);
if (IS_ERR(trans)) {
- btrfs_dec_block_group_ro(root, block_group);
+ btrfs_dec_block_group_ro(block_group);
ret = PTR_ERR(trans);
goto next;
}
@@ -10908,14 +10683,14 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- btrfs_dec_block_group_ro(root, block_group);
+ btrfs_dec_block_group_ro(block_group);
goto end_trans;
}
ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- btrfs_dec_block_group_ro(root, block_group);
+ btrfs_dec_block_group_ro(block_group);
goto end_trans;
}
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
@@ -10934,7 +10709,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_unlock(&space_info->lock);
/* DISCARD can flip during remount */
- trimming = btrfs_test_opt(root->fs_info, DISCARD);
+ trimming = btrfs_test_opt(fs_info, DISCARD);
/* Implicit trim during transaction commit. */
if (trimming)
@@ -10944,7 +10719,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* Btrfs_remove_chunk will abort the transaction if things go
* horribly wrong.
*/
- ret = btrfs_remove_chunk(trans, root,
+ ret = btrfs_remove_chunk(trans, fs_info,
block_group->key.objectid);
if (ret) {
@@ -10971,7 +10746,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
btrfs_get_block_group(block_group);
}
end_trans:
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
next:
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
btrfs_put_block_group(block_group);
@@ -11018,9 +10793,10 @@ out:
return ret;
}
-int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
+ u64 start, u64 end)
{
- return unpin_extent_range(root, start, end, false);
+ return unpin_extent_range(fs_info, start, end, false);
}
/*
@@ -11060,7 +10836,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
ret = 0;
while (1) {
- struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_transaction *trans;
u64 bytes;
@@ -11110,9 +10886,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
return ret;
}
-int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_device *device;
struct list_head *devices;
@@ -11167,11 +10942,11 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
}
}
- cache = next_block_group(fs_info->tree_root, cache);
+ cache = next_block_group(fs_info, cache);
}
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- devices = &root->fs_info->fs_devices->alloc_list;
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ devices = &fs_info->fs_devices->alloc_list;
list_for_each_entry(device, devices, dev_alloc_list) {
ret = btrfs_trim_free_extents(device, range->minlen,
&group_trimmed);
@@ -11180,7 +10955,7 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
trimmed += group_trimmed;
}
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
range->len = trimmed;
return ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1e67723c27a1..4ac383a3a649 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2029,7 +2029,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
* read repair operation.
*/
btrfs_bio_counter_inc_blocked(fs_info);
- ret = btrfs_map_block(fs_info, WRITE, logical,
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
&map_length, &bbio, mirror_num);
if (ret) {
btrfs_bio_counter_dec(fs_info);
@@ -2067,20 +2067,20 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
return 0;
}
-int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
- int mirror_num)
+int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int mirror_num)
{
u64 start = eb->start;
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
int ret = 0;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
+ if (fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
- ret = repair_io_failure(root->fs_info->btree_inode, start,
+ ret = repair_io_failure(fs_info->btree_inode, start,
PAGE_SIZE, start, p,
start - page_offset(p), mirror_num);
if (ret)
@@ -2341,6 +2341,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct page *page, int pg_offset, int icsum,
bio_end_io_t *endio_func, void *data)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio;
struct btrfs_io_bio *btrfs_failed_bio;
struct btrfs_io_bio *btrfs_bio;
@@ -2351,13 +2352,12 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
bio->bi_end_io = endio_func;
bio->bi_iter.bi_sector = failrec->logical >> 9;
- bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+ bio->bi_bdev = fs_info->fs_devices->latest_bdev;
bio->bi_iter.bi_size = 0;
bio->bi_private = data;
btrfs_failed_bio = btrfs_io_bio(failed_bio);
if (btrfs_failed_bio->csum) {
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
btrfs_bio = btrfs_io_bio(bio);
@@ -2474,6 +2474,8 @@ static void end_bio_extent_writepage(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
+ struct inode *inode = page->mapping->host;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
/* We always issue full-page reads, but if some block
* in a page fails to read, blk_update_request() will
@@ -2482,11 +2484,11 @@ static void end_bio_extent_writepage(struct bio *bio)
* if they don't add up to a full page. */
if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
- btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
+ btrfs_err(fs_info,
"partial page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
else
- btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
+ btrfs_info(fs_info,
"incomplete page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
}
@@ -3741,16 +3743,15 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
if (btrfs_header_level(eb) > 0) {
end = btrfs_node_key_ptr_offset(nritems);
- memset_extent_buffer(eb, 0, end, eb->len - end);
+ memzero_extent_buffer(eb, end, eb->len - end);
} else {
/*
* leaf:
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
*/
start = btrfs_item_nr_offset(nritems);
- end = btrfs_leaf_data(eb) +
- leaf_data_end(fs_info->tree_root, eb);
- memset_extent_buffer(eb, 0, start, end - start);
+ end = btrfs_leaf_data(eb) + leaf_data_end(fs_info, eb);
+ memzero_extent_buffer(eb, start, end - start);
}
for (i = 0; i < num_pages; i++) {
@@ -4341,7 +4342,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
u64 last,
get_extent_t *get_extent)
{
- u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
+ u64 sectorsize = btrfs_inode_sectorsize(inode);
struct extent_map *em;
u64 len;
@@ -4402,8 +4403,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -ENOMEM;
path->leave_spinning = 1;
- start = round_down(start, BTRFS_I(inode)->root->sectorsize);
- len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
+ start = round_down(start, btrfs_inode_sectorsize(inode));
+ len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
/*
* lookup the last file extent. We're not using i_size here
@@ -4537,7 +4538,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
root->objectid,
btrfs_ino(inode), bytenr);
if (trans)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret < 0)
goto out_free;
if (ret)
@@ -4718,9 +4719,9 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
WARN_ON(PageDirty(p));
SetPageUptodate(p);
new->pages[i] = p;
+ copy_page(page_address(p), page_address(src->pages[i]));
}
- copy_extent_buffer(new, src, 0, 0, src->len);
set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
@@ -4758,21 +4759,9 @@ err:
}
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start, u32 nodesize)
+ u64 start)
{
- unsigned long len;
-
- if (!fs_info) {
- /*
- * Called only from tests that don't always have a fs_info
- * available
- */
- len = nodesize;
- } else {
- len = fs_info->tree_root->nodesize;
- }
-
- return __alloc_dummy_extent_buffer(fs_info, start, len);
+ return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
}
static void check_buffer_tree_ref(struct extent_buffer *eb)
@@ -4863,7 +4852,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start, u32 nodesize)
+ u64 start)
{
struct extent_buffer *eb, *exists = NULL;
int ret;
@@ -4871,7 +4860,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
- eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
+ eb = alloc_dummy_extent_buffer(fs_info, start);
if (!eb)
return NULL;
eb->fs_info = fs_info;
@@ -4911,7 +4900,7 @@ free_eb:
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
- unsigned long len = fs_info->tree_root->nodesize;
+ unsigned long len = fs_info->nodesize;
unsigned long num_pages = num_extent_pages(start, len);
unsigned long i;
unsigned long index = start >> PAGE_SHIFT;
@@ -4922,7 +4911,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
int uptodate = 1;
int ret;
- if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
+ if (!IS_ALIGNED(start, fs_info->sectorsize)) {
btrfs_err(fs_info, "bad tree block start %llu", start);
return ERR_PTR(-EINVAL);
}
@@ -5463,6 +5452,27 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
return ret;
}
+void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
+ const void *srcv)
+{
+ char *kaddr;
+
+ WARN_ON(!PageUptodate(eb->pages[0]));
+ kaddr = page_address(eb->pages[0]);
+ memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
+ BTRFS_FSID_SIZE);
+}
+
+void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
+{
+ char *kaddr;
+
+ WARN_ON(!PageUptodate(eb->pages[0]));
+ kaddr = page_address(eb->pages[0]);
+ memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
+ BTRFS_FSID_SIZE);
+}
+
void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
unsigned long start, unsigned long len)
{
@@ -5494,8 +5504,8 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
}
}
-void memset_extent_buffer(struct extent_buffer *eb, char c,
- unsigned long start, unsigned long len)
+void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
+ unsigned long len)
{
size_t cur;
size_t offset;
@@ -5515,7 +5525,7 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
cur = min(len, PAGE_SIZE - offset);
kaddr = page_address(page);
- memset(kaddr + offset, c, cur);
+ memset(kaddr + offset, 0, cur);
len -= cur;
offset = 0;
@@ -5523,6 +5533,20 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
}
}
+void copy_extent_buffer_full(struct extent_buffer *dst,
+ struct extent_buffer *src)
+{
+ int i;
+ unsigned num_pages;
+
+ ASSERT(dst->len == src->len);
+
+ num_pages = num_extent_pages(dst->start, dst->len);
+ for (i = 0; i < num_pages; i++)
+ copy_page(page_address(dst->pages[i]),
+ page_address(src->pages[i]));
+}
+
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
@@ -5764,6 +5788,7 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len)
{
+ struct btrfs_fs_info *fs_info = dst->fs_info;
size_t cur;
size_t dst_off_in_page;
size_t src_off_in_page;
@@ -5772,13 +5797,13 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i;
if (src_offset + len > dst->len) {
- btrfs_err(dst->fs_info,
+ btrfs_err(fs_info,
"memmove bogus src_offset %lu move len %lu dst len %lu",
src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
- btrfs_err(dst->fs_info,
+ btrfs_err(fs_info,
"memmove bogus dst_offset %lu move len %lu dst len %lu",
dst_offset, len, dst->len);
BUG_ON(1);
@@ -5810,6 +5835,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len)
{
+ struct btrfs_fs_info *fs_info = dst->fs_info;
size_t cur;
size_t dst_off_in_page;
size_t src_off_in_page;
@@ -5820,13 +5846,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i;
if (src_offset + len > dst->len) {
- btrfs_err(dst->fs_info,
+ btrfs_err(fs_info,
"memmove bogus src_offset %lu move len %lu len %lu",
src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
- btrfs_err(dst->fs_info,
+ btrfs_err(fs_info,
"memmove bogus dst_offset %lu move len %lu len %lu",
dst_offset, len, dst->len);
BUG_ON(1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index ab31d145227e..17f9ce479ed7 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -371,7 +371,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start, u32 nodesize);
+ u64 start);
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
@@ -405,8 +405,13 @@ void read_extent_buffer(struct extent_buffer *eb, void *dst,
int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
unsigned long start,
unsigned long len);
+void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
+void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
+ const void *src);
void write_extent_buffer(struct extent_buffer *eb, const void *src,
unsigned long start, unsigned long len);
+void copy_extent_buffer_full(struct extent_buffer *dst,
+ struct extent_buffer *src);
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len);
@@ -414,8 +419,8 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
-void memset_extent_buffer(struct extent_buffer *eb, char c,
- unsigned long start, unsigned long len);
+void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
+ unsigned long len);
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
unsigned long pos);
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
@@ -452,8 +457,8 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
int clean_io_failure(struct inode *inode, u64 start, struct page *page,
unsigned int pg_offset);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
-int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
- int mirror_num);
+int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int mirror_num);
/*
* When IO fails, either with EIO or csum verification fails, we
@@ -491,5 +496,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
u64 *end, u64 max_bytes);
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start, u32 nodesize);
+ u64 start);
#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index d0d571c47d33..e97e322c28f0 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -34,9 +34,9 @@
#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
PAGE_SIZE))
-#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
+#define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
sizeof(struct btrfs_ordered_sum)) / \
- sizeof(u32) * (r)->sectorsize)
+ sizeof(u32) * (fs_info)->sectorsize)
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -90,13 +90,14 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 bytenr, int cow)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_key file_key;
struct btrfs_key found_key;
struct btrfs_csum_item *item;
struct extent_buffer *leaf;
u64 csum_offset = 0;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int csums_in_item;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -116,7 +117,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
goto fail;
csum_offset = (bytenr - found_key.offset) >>
- root->fs_info->sb->s_blocksize_bits;
+ fs_info->sb->s_blocksize_bits;
csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
csums_in_item /= csum_size;
@@ -159,11 +160,11 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
kfree(bio->csum_allocated);
}
-static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
- struct inode *inode, struct bio *bio,
+static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio)
{
- struct bio_vec *bvec = bio->bi_io_vec;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct bio_vec *bvec;
struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
struct btrfs_csum_item *item = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -176,9 +177,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
u64 page_bytes_left;
u32 diff;
int nblocks;
- int bio_index = 0;
- int count;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ int count = 0, i;
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
path = btrfs_alloc_path();
if (!path)
@@ -223,8 +223,11 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (dio)
offset = logical_offset;
- page_bytes_left = bvec->bv_len;
- while (bio_index < bio->bi_vcnt) {
+ bio_for_each_segment_all(bvec, bio, i) {
+ page_bytes_left = bvec->bv_len;
+ if (count)
+ goto next;
+
if (!dio)
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
@@ -239,7 +242,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (item)
btrfs_release_path(path);
- item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
+ item = btrfs_lookup_csum(NULL, fs_info->csum_root,
path, disk_bytenr, 0);
if (IS_ERR(item)) {
count = 1;
@@ -247,10 +250,10 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (BTRFS_I(inode)->root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
set_extent_bits(io_tree, offset,
- offset + root->sectorsize - 1,
+ offset + fs_info->sectorsize - 1,
EXTENT_NODATASUM);
} else {
- btrfs_info_rl(BTRFS_I(inode)->root->fs_info,
+ btrfs_info_rl(fs_info,
"no csum found for inode %llu start %llu",
btrfs_ino(inode), offset);
}
@@ -266,7 +269,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
path->slots[0]);
item_last_offset = item_start_offset +
(item_size / csum_size) *
- root->sectorsize;
+ fs_info->sectorsize;
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_csum_item);
}
@@ -275,7 +278,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
* a single leaf so it will also fit inside a u32
*/
diff = disk_bytenr - item_start_offset;
- diff = diff / root->sectorsize;
+ diff = diff / fs_info->sectorsize;
diff = diff * csum_size;
count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
inode->i_sb->s_blocksize_bits);
@@ -285,48 +288,35 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
found:
csum += count * csum_size;
nblocks -= count;
-
+next:
while (count--) {
- disk_bytenr += root->sectorsize;
- offset += root->sectorsize;
- page_bytes_left -= root->sectorsize;
- if (!page_bytes_left) {
- bio_index++;
- /*
- * make sure we're still inside the
- * bio before we update page_bytes_left
- */
- if (bio_index >= bio->bi_vcnt) {
- WARN_ON_ONCE(count);
- goto done;
- }
- bvec++;
- page_bytes_left = bvec->bv_len;
- }
-
+ disk_bytenr += fs_info->sectorsize;
+ offset += fs_info->sectorsize;
+ page_bytes_left -= fs_info->sectorsize;
+ if (!page_bytes_left)
+ break; /* move to next bio */
}
}
-done:
+ WARN_ON_ONCE(count);
btrfs_free_path(path);
return 0;
}
-int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u32 *dst)
+int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
{
- return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
+ return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
}
-int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 offset)
+int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
{
- return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
+ return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_path *path;
struct extent_buffer *leaf;
@@ -337,10 +327,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
int ret;
size_t size;
u64 csum_end;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
- ASSERT(IS_ALIGNED(start, root->sectorsize) &&
- IS_ALIGNED(end + 1, root->sectorsize));
+ ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
+ IS_ALIGNED(end + 1, fs_info->sectorsize));
path = btrfs_alloc_path();
if (!path)
@@ -365,7 +355,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
key.type == BTRFS_EXTENT_CSUM_KEY) {
offset = (start - key.offset) >>
- root->fs_info->sb->s_blocksize_bits;
+ fs_info->sb->s_blocksize_bits;
if (offset * csum_size <
btrfs_item_size_nr(leaf, path->slots[0] - 1))
path->slots[0]--;
@@ -393,7 +383,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
start = key.offset;
size = btrfs_item_size_nr(leaf, path->slots[0]);
- csum_end = key.offset + (size / csum_size) * root->sectorsize;
+ csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
if (csum_end <= start) {
path->slots[0]++;
continue;
@@ -404,8 +394,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct btrfs_csum_item);
while (start < csum_end) {
size = min_t(size_t, csum_end - start,
- MAX_ORDERED_SUM_BYTES(root));
- sums = kzalloc(btrfs_ordered_sum_size(root, size),
+ MAX_ORDERED_SUM_BYTES(fs_info));
+ sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
GFP_NOFS);
if (!sums) {
ret = -ENOMEM;
@@ -416,16 +406,16 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
sums->len = (int)size;
offset = (start - key.offset) >>
- root->fs_info->sb->s_blocksize_bits;
+ fs_info->sb->s_blocksize_bits;
offset *= csum_size;
- size >>= root->fs_info->sb->s_blocksize_bits;
+ size >>= fs_info->sb->s_blocksize_bits;
read_extent_buffer(path->nodes[0],
sums->sums,
((unsigned long)item) + offset,
csum_size * size);
- start += root->sectorsize * size;
+ start += fs_info->sectorsize * size;
list_add_tail(&sums->list, &tmplist);
}
path->slots[0]++;
@@ -443,23 +433,23 @@ fail:
return ret;
}
-int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 file_start, int contig)
+int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+ u64 file_start, int contig)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums;
- struct btrfs_ordered_extent *ordered;
+ struct btrfs_ordered_extent *ordered = NULL;
char *data;
- struct bio_vec *bvec = bio->bi_io_vec;
- int bio_index = 0;
+ struct bio_vec *bvec;
int index;
int nr_sectors;
- int i;
+ int i, j;
unsigned long total_bytes = 0;
unsigned long this_sum_bytes = 0;
u64 offset;
WARN_ON(bio->bi_vcnt <= 0);
- sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
+ sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
GFP_NOFS);
if (!sums)
return -ENOMEM;
@@ -470,22 +460,25 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
if (contig)
offset = file_start;
else
- offset = page_offset(bvec->bv_page) + bvec->bv_offset;
+ offset = 0; /* shut up gcc */
- ordered = btrfs_lookup_ordered_extent(inode, offset);
- BUG_ON(!ordered); /* Logic error */
sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
index = 0;
- while (bio_index < bio->bi_vcnt) {
+ bio_for_each_segment_all(bvec, bio, j) {
if (!contig)
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
+ if (!ordered) {
+ ordered = btrfs_lookup_ordered_extent(inode, offset);
+ BUG_ON(!ordered); /* Logic error */
+ }
+
data = kmap_atomic(bvec->bv_page);
- nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- bvec->bv_len + root->sectorsize
- - 1);
+ nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
+ bvec->bv_len + fs_info->sectorsize
+ - 1);
for (i = 0; i < nr_sectors; i++) {
if (offset >= ordered->file_offset + ordered->len ||
@@ -500,8 +493,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
bytes_left = bio->bi_iter.bi_size - total_bytes;
- sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
- GFP_NOFS);
+ sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
+ GFP_NOFS);
BUG_ON(!sums); /* -ENOMEM */
sums->len = bytes_left;
ordered = btrfs_lookup_ordered_extent(inode,
@@ -517,21 +510,18 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
sums->sums[index] = ~(u32)0;
sums->sums[index]
= btrfs_csum_data(data + bvec->bv_offset
- + (i * root->sectorsize),
+ + (i * fs_info->sectorsize),
sums->sums[index],
- root->sectorsize);
+ fs_info->sectorsize);
btrfs_csum_final(sums->sums[index],
(char *)(sums->sums + index));
index++;
- offset += root->sectorsize;
- this_sum_bytes += root->sectorsize;
- total_bytes += root->sectorsize;
+ offset += fs_info->sectorsize;
+ this_sum_bytes += fs_info->sectorsize;
+ total_bytes += fs_info->sectorsize;
}
kunmap_atomic(data);
-
- bio_index++;
- bvec++;
}
this_sum_bytes = 0;
btrfs_add_ordered_sum(inode, ordered, sums);
@@ -550,20 +540,20 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
* This calls btrfs_truncate_item with the correct args based on the
* overlap, and fixes up the key as required.
*/
-static noinline void truncate_one_csum(struct btrfs_root *root,
+static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *key,
u64 bytenr, u64 len)
{
struct extent_buffer *leaf;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
u64 csum_end;
u64 end_byte = bytenr + len;
- u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
+ u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
leaf = path->nodes[0];
csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
- csum_end <<= root->fs_info->sb->s_blocksize_bits;
+ csum_end <<= fs_info->sb->s_blocksize_bits;
csum_end += key->offset;
if (key->offset < bytenr && csum_end <= end_byte) {
@@ -575,7 +565,7 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(root, path, new_size, 1);
+ btrfs_truncate_item(fs_info, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
@@ -587,10 +577,10 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(root, path, new_size, 0);
+ btrfs_truncate_item(fs_info, path, new_size, 0);
key->offset = end_byte;
- btrfs_set_item_key_safe(root->fs_info, path, key);
+ btrfs_set_item_key_safe(fs_info, path, key);
} else {
BUG();
}
@@ -601,18 +591,17 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
* range of bytes.
*/
int btrfs_del_csums(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr, u64 len)
+ struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
{
+ struct btrfs_root *root = fs_info->csum_root;
struct btrfs_path *path;
struct btrfs_key key;
u64 end_byte = bytenr + len;
u64 csum_end;
struct extent_buffer *leaf;
int ret;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
- int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
-
- root = root->fs_info->csum_root;
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ int blocksize_bits = fs_info->sb->s_blocksize_bits;
path = btrfs_alloc_path();
if (!path)
@@ -689,7 +678,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
item_offset = btrfs_item_ptr_offset(leaf,
path->slots[0]);
- memset_extent_buffer(leaf, 0, item_offset + offset,
+ memzero_extent_buffer(leaf, item_offset + offset,
shift_len);
key.offset = bytenr;
@@ -705,7 +694,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
key.offset = end_byte - 1;
} else {
- truncate_one_csum(root, path, &key, bytenr, len);
+ truncate_one_csum(fs_info, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
@@ -721,6 +710,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key file_key;
struct btrfs_key found_key;
struct btrfs_path *path;
@@ -736,7 +726,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
int index = 0;
int found_next;
int ret;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
path = btrfs_alloc_path();
if (!path)
@@ -769,7 +759,7 @@ again:
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
if ((item_size / csum_size) >=
- MAX_CSUM_ITEMS(root, csum_size)) {
+ MAX_CSUM_ITEMS(fs_info, csum_size)) {
/* already at max size, make a new one */
goto insert;
}
@@ -815,11 +805,11 @@ again:
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
csum_offset = (bytenr - found_key.offset) >>
- root->fs_info->sb->s_blocksize_bits;
+ fs_info->sb->s_blocksize_bits;
if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
- csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
+ csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
goto insert;
}
@@ -830,26 +820,27 @@ again:
u32 diff;
u32 free_space;
- if (btrfs_leaf_free_space(root, leaf) <
+ if (btrfs_leaf_free_space(fs_info, leaf) <
sizeof(struct btrfs_item) + csum_size * 2)
goto insert;
- free_space = btrfs_leaf_free_space(root, leaf) -
+ free_space = btrfs_leaf_free_space(fs_info, leaf) -
sizeof(struct btrfs_item) - csum_size;
tmp = sums->len - total_bytes;
- tmp >>= root->fs_info->sb->s_blocksize_bits;
+ tmp >>= fs_info->sb->s_blocksize_bits;
WARN_ON(tmp < 1);
extend_nr = max_t(int, 1, (int)tmp);
diff = (csum_offset + extend_nr) * csum_size;
- diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
+ diff = min(diff,
+ MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
diff = min(free_space, diff);
diff /= csum_size;
diff *= csum_size;
- btrfs_extend_item(root, path, diff);
+ btrfs_extend_item(fs_info, path, diff);
ret = 0;
goto csum;
}
@@ -861,12 +852,12 @@ insert:
u64 tmp;
tmp = sums->len - total_bytes;
- tmp >>= root->fs_info->sb->s_blocksize_bits;
+ tmp >>= fs_info->sb->s_blocksize_bits;
tmp = min(tmp, (next_offset - file_key.offset) >>
- root->fs_info->sb->s_blocksize_bits);
+ fs_info->sb->s_blocksize_bits);
tmp = max((u64)1, tmp);
- tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
+ tmp = min(tmp, (u64)MAX_CSUM_ITEMS(fs_info, csum_size));
ins_size = csum_size * tmp;
} else {
ins_size = csum_size;
@@ -888,7 +879,7 @@ csum:
csum_offset * csum_size);
found:
ins_size = (u32)(sums->len - total_bytes) >>
- root->fs_info->sb->s_blocksize_bits;
+ fs_info->sb->s_blocksize_bits;
ins_size *= csum_size;
ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
ins_size);
@@ -896,7 +887,7 @@ found:
ins_size);
ins_size /= csum_size;
- total_bytes += ins_size * root->sectorsize;
+ total_bytes += ins_size * fs_info->sectorsize;
index += ins_size;
btrfs_mark_buffer_dirty(path->nodes[0]);
@@ -919,6 +910,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
const bool new_inline,
struct extent_map *em)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf = path->nodes[0];
const int slot = path->slots[0];
@@ -928,7 +920,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi);
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
btrfs_item_key_to_cpu(leaf, &key, slot);
extent_start = key.offset;
@@ -939,7 +931,8 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, slot, fi);
- extent_end = ALIGN(extent_start + size, root->sectorsize);
+ extent_end = ALIGN(extent_start + size,
+ fs_info->sectorsize);
}
em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
@@ -982,7 +975,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
em->compress_type = compress_type;
}
} else {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"unknown file extent item type %d, inode %llu, offset %llu, root %llu",
type, btrfs_ino(inode), extent_start,
root->root_key.objectid);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3a14c87d9c92..448f57d108d1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -27,7 +27,6 @@
#include <linux/falloc.h>
#include <linux/swap.h>
#include <linux/writeback.h>
-#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/btrfs.h>
@@ -96,13 +95,13 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
static int __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct inode_defrag *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
int ret;
- p = &root->fs_info->defrag_inodes.rb_node;
+ p = &fs_info->defrag_inodes.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
@@ -126,16 +125,16 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
}
set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
rb_link_node(&defrag->rb_node, parent, p);
- rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
+ rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
return 0;
}
-static inline int __need_auto_defrag(struct btrfs_root *root)
+static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
{
- if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG))
+ if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0;
- if (btrfs_fs_closing(root->fs_info))
+ if (btrfs_fs_closing(fs_info))
return 0;
return 1;
@@ -148,12 +147,13 @@ static inline int __need_auto_defrag(struct btrfs_root *root)
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag;
u64 transid;
int ret;
- if (!__need_auto_defrag(root))
+ if (!__need_auto_defrag(fs_info))
return 0;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
@@ -172,7 +172,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
defrag->transid = transid;
defrag->root = root->root_key.objectid;
- spin_lock(&root->fs_info->defrag_inodes_lock);
+ spin_lock(&fs_info->defrag_inodes_lock);
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
/*
* If we set IN_DEFRAG flag and evict the inode from memory,
@@ -185,7 +185,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
} else {
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
}
- spin_unlock(&root->fs_info->defrag_inodes_lock);
+ spin_unlock(&fs_info->defrag_inodes_lock);
return 0;
}
@@ -197,19 +197,19 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
static void btrfs_requeue_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
- if (!__need_auto_defrag(root))
+ if (!__need_auto_defrag(fs_info))
goto out;
/*
* Here we don't check the IN_DEFRAG flag, because we need merge
* them together.
*/
- spin_lock(&root->fs_info->defrag_inodes_lock);
+ spin_lock(&fs_info->defrag_inodes_lock);
ret = __btrfs_add_inode_defrag(inode, defrag);
- spin_unlock(&root->fs_info->defrag_inodes_lock);
+ spin_unlock(&fs_info->defrag_inodes_lock);
if (ret)
goto out;
return;
@@ -373,7 +373,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
&fs_info->fs_state))
break;
- if (!__need_auto_defrag(fs_info->tree_root))
+ if (!__need_auto_defrag(fs_info))
break;
/* find an inode to defrag */
@@ -485,11 +485,11 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
-int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
- struct page **pages, size_t num_pages,
- loff_t pos, size_t write_bytes,
- struct extent_state **cached)
+int btrfs_dirty_pages(struct inode *inode, struct page **pages,
+ size_t num_pages, loff_t pos, size_t write_bytes,
+ struct extent_state **cached)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int err = 0;
int i;
u64 num_bytes;
@@ -498,8 +498,9 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(inode);
- start_pos = pos & ~((u64)root->sectorsize - 1);
- num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
+ start_pos = pos & ~((u64) fs_info->sectorsize - 1);
+ num_bytes = round_up(write_bytes + pos - start_pos,
+ fs_info->sectorsize);
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@@ -696,6 +697,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
u32 extent_item_size,
int *key_inserted)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
@@ -706,6 +708,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
u64 num_bytes = 0;
u64 extent_offset = 0;
u64 extent_end = 0;
+ u64 last_end = start;
int del_nr = 0;
int del_slot = 0;
int extent_type;
@@ -723,7 +726,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
modify_tree = 0;
update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
- root == root->fs_info->tree_root);
+ root == fs_info->tree_root);
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -797,8 +800,10 @@ next_slot:
* extent item in the call to setup_items_for_insert() later
* in this function.
*/
- if (extent_end == key.offset && extent_end >= search_start)
+ if (extent_end == key.offset && extent_end >= search_start) {
+ last_end = extent_end;
goto delete_extent_item;
+ }
if (extent_end <= search_start) {
path->slots[0]++;
@@ -851,7 +856,7 @@ next_slot:
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0) {
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
new_key.objectid,
@@ -861,6 +866,12 @@ next_slot:
key.offset = start;
}
/*
+ * From here on out we will have actually dropped something, so
+ * last_end can be updated.
+ */
+ last_end = extent_end;
+
+ /*
* | ---- range to drop ----- |
* | -------- extent -------- |
*/
@@ -872,7 +883,7 @@ next_slot:
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = end;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+ btrfs_set_item_key_safe(fs_info, path, &new_key);
extent_offset += end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
@@ -927,9 +938,9 @@ delete_extent_item:
inode_sub_bytes(inode,
extent_end - key.offset);
extent_end = ALIGN(extent_end,
- root->sectorsize);
+ fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
- ret = btrfs_free_extent(trans, root,
+ ret = btrfs_free_extent(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
key.objectid, key.offset -
@@ -986,7 +997,7 @@ delete_extent_item:
if (!ret && replace_extent && leafs_visited == 1 &&
(path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
path->locks[0] == BTRFS_WRITE_LOCK) &&
- btrfs_leaf_free_space(root, leaf) >=
+ btrfs_leaf_free_space(fs_info, leaf) >=
sizeof(struct btrfs_item) + extent_item_size) {
key.objectid = ino;
@@ -1010,7 +1021,7 @@ delete_extent_item:
if (!replace_extent || !(*key_inserted))
btrfs_release_path(path);
if (drop_end)
- *drop_end = found ? min(end, extent_end) : end;
+ *drop_end = found ? min(end, last_end) : end;
return ret;
}
@@ -1073,6 +1084,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
@@ -1142,7 +1154,7 @@ again:
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+ btrfs_set_item_key_safe(fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi,
@@ -1176,7 +1188,7 @@ again:
trans->transid);
path->slots[0]++;
new_key.offset = start;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+ btrfs_set_item_key_safe(fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -1222,8 +1234,8 @@ again:
extent_end - split);
btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
- root->root_key.objectid,
+ ret = btrfs_inc_extent_ref(trans, fs_info, bytenr, num_bytes,
+ 0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -1256,7 +1268,7 @@ again:
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@@ -1276,7 +1288,7 @@ again:
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@@ -1409,15 +1421,16 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
u64 *lockstart, u64 *lockend,
struct extent_state **cached_state)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 start_pos;
u64 last_pos;
int i;
int ret = 0;
- start_pos = round_down(pos, root->sectorsize);
+ start_pos = round_down(pos, fs_info->sectorsize);
last_pos = start_pos
- + round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
+ + round_up(pos + write_bytes - start_pos,
+ fs_info->sectorsize) - 1;
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
@@ -1464,6 +1477,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
static noinline int check_can_nocow(struct inode *inode, loff_t pos,
size_t *write_bytes)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend;
@@ -1474,8 +1488,9 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
if (!ret)
return -ENOSPC;
- lockstart = round_down(pos, root->sectorsize);
- lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
+ lockstart = round_down(pos, fs_info->sectorsize);
+ lockend = round_up(pos + *write_bytes,
+ fs_info->sectorsize) - 1;
while (1) {
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
@@ -1509,6 +1524,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
loff_t pos)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
struct extent_state *cached_state = NULL;
@@ -1555,9 +1571,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break;
}
- sector_offset = pos & (root->sectorsize - 1);
+ sector_offset = pos & (fs_info->sectorsize - 1);
reserve_bytes = round_up(write_bytes + sector_offset,
- root->sectorsize);
+ fs_info->sectorsize);
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
if (ret < 0) {
@@ -1577,7 +1593,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
PAGE_SIZE);
reserve_bytes = round_up(write_bytes +
sector_offset,
- root->sectorsize);
+ fs_info->sectorsize);
} else {
break;
}
@@ -1621,12 +1637,10 @@ again:
copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
- num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- reserve_bytes);
+ num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
dirty_sectors = round_up(copied + sector_offset,
- root->sectorsize);
- dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- dirty_sectors);
+ fs_info->sectorsize);
+ dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
/*
* if we have trouble faulting in the pages, fall
@@ -1654,11 +1668,9 @@ again:
* managed to copy.
*/
if (num_sectors > dirty_sectors) {
-
/* release everything except the sectors we dirtied */
release_bytes -= dirty_sectors <<
- root->fs_info->sb->s_blocksize_bits;
-
+ fs_info->sb->s_blocksize_bits;
if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
@@ -1670,7 +1682,8 @@ again:
} else {
u64 __pos;
- __pos = round_down(pos, root->sectorsize) +
+ __pos = round_down(pos,
+ fs_info->sectorsize) +
(dirty_pages << PAGE_SHIFT);
btrfs_delalloc_release_space(inode, __pos,
release_bytes);
@@ -1678,12 +1691,11 @@ again:
}
release_bytes = round_up(copied + sector_offset,
- root->sectorsize);
+ fs_info->sectorsize);
if (copied > 0)
- ret = btrfs_dirty_pages(root, inode, pages,
- dirty_pages, pos, copied,
- NULL);
+ ret = btrfs_dirty_pages(inode, pages, dirty_pages,
+ pos, copied, NULL);
if (need_unlock)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state,
@@ -1698,8 +1710,10 @@ again:
btrfs_end_write_no_snapshoting(root);
if (only_release_metadata && copied > 0) {
- lockstart = round_down(pos, root->sectorsize);
- lockend = round_up(pos + copied, root->sectorsize) - 1;
+ lockstart = round_down(pos,
+ fs_info->sectorsize);
+ lockend = round_up(pos + copied,
+ fs_info->sectorsize) - 1;
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL,
@@ -1712,8 +1726,8 @@ again:
cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping);
- if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
- btrfs_btree_balance_dirty(root);
+ if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
+ btrfs_btree_balance_dirty(fs_info);
pos += copied;
num_written += copied;
@@ -1727,7 +1741,7 @@ again:
btrfs_delalloc_release_metadata(inode, release_bytes);
} else {
btrfs_delalloc_release_space(inode,
- round_down(pos, root->sectorsize),
+ round_down(pos, fs_info->sectorsize),
release_bytes);
}
}
@@ -1798,6 +1812,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start_pos;
u64 end_pos;
@@ -1829,7 +1844,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
* although we have opened a file as writable, we have
* to stop this write operation to ensure FS consistency.
*/
- if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
inode_unlock(inode);
err = -EROFS;
goto out;
@@ -1845,17 +1860,18 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
pos = iocb->ki_pos;
count = iov_iter_count(from);
- start_pos = round_down(pos, root->sectorsize);
+ start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
if (start_pos > oldsize) {
/* Expand hole size to cover write data, preventing empty gap */
- end_pos = round_up(pos + count, root->sectorsize);
+ end_pos = round_up(pos + count,
+ fs_info->sectorsize);
err = btrfs_cont_expand(inode, oldsize, end_pos);
if (err) {
inode_unlock(inode);
goto out;
}
- if (start_pos > round_up(oldsize, root->sectorsize))
+ if (start_pos > round_up(oldsize, fs_info->sectorsize))
clean_page = 1;
}
@@ -1935,6 +1951,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
@@ -2045,12 +2062,12 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* commit does not start nor waits for ordered extents to complete.
*/
smp_mb();
- if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+ if (btrfs_inode_in_log(inode, fs_info->generation) ||
(full_sync && BTRFS_I(inode)->last_trans <=
- root->fs_info->last_trans_committed) ||
+ fs_info->last_trans_committed) ||
(!btrfs_have_ordered_extents_in_range(inode, start, len) &&
BTRFS_I(inode)->last_trans
- <= root->fs_info->last_trans_committed)) {
+ <= fs_info->last_trans_committed)) {
/*
* We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
@@ -2129,7 +2146,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* which are indicated by ctx.io_err.
*/
if (ctx.io_err) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
ret = ctx.io_err;
goto out;
}
@@ -2138,20 +2155,20 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!ret) {
ret = btrfs_sync_log(trans, root, &ctx);
if (!ret) {
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
goto out;
}
}
if (!full_sync) {
ret = btrfs_wait_ordered_range(inode, start, len);
if (ret) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
}
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
} else {
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
}
out:
return ret > 0 ? -EIO : ret;
@@ -2208,6 +2225,7 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
struct btrfs_path *path, u64 offset, u64 end)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
@@ -2216,7 +2234,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
struct btrfs_key key;
int ret;
- if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ if (btrfs_fs_incompat(fs_info, NO_HOLES))
goto out;
key.objectid = btrfs_ino(inode);
@@ -2224,9 +2242,15 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
key.offset = offset;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
- if (ret < 0)
+ if (ret <= 0) {
+ /*
+ * We should have dropped this offset, so if we find it then
+ * something has gone horribly wrong.
+ */
+ if (ret == 0)
+ ret = -EINVAL;
return ret;
- BUG_ON(!ret);
+ }
leaf = path->nodes[0];
if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
@@ -2248,7 +2272,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
u64 num_bytes;
key.offset = offset;
- btrfs_set_item_key_safe(root->fs_info, path, &key);
+ btrfs_set_item_key_safe(fs_info, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@@ -2284,7 +2308,7 @@ out:
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
- hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
+ hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid;
@@ -2336,6 +2360,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
@@ -2347,13 +2372,13 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
u64 tail_len;
u64 orig_start = offset;
u64 cur_offset;
- u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
+ u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1);
u64 drop_end;
int ret = 0;
int err = 0;
unsigned int rsv_count;
bool same_block;
- bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
+ bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
u64 ino_size;
bool truncated_block = false;
bool updated_inode = false;
@@ -2363,7 +2388,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
return ret;
inode_lock(inode);
- ino_size = round_up(inode->i_size, root->sectorsize);
+ ino_size = round_up(inode->i_size, fs_info->sectorsize);
ret = find_first_non_hole(inode, &offset, &len);
if (ret < 0)
goto out_only_mutex;
@@ -2373,11 +2398,11 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_only_mutex;
}
- lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
+ lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
lockend = round_down(offset + len,
- BTRFS_I(inode)->root->sectorsize) - 1;
- same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
- == (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
+ btrfs_inode_sectorsize(inode)) - 1;
+ same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
+ == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
/*
* We needn't truncate any block which is beyond the end of the file
* because we are sure there is no data there.
@@ -2386,7 +2411,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
* Only do this if we are in the same block and we aren't doing the
* entire block.
*/
- if (same_block && len < root->sectorsize) {
+ if (same_block && len < fs_info->sectorsize) {
if (offset < ino_size) {
truncated_block = true;
ret = btrfs_truncate_block(inode, offset, len, 0);
@@ -2489,12 +2514,12 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out;
}
- rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
ret = -ENOMEM;
goto out_free;
}
- rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
+ rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1);
rsv->failfast = 1;
/*
@@ -2509,7 +2534,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_free;
}
- ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
min_size, 0);
BUG_ON(ret);
trans->block_rsv = rsv;
@@ -2523,12 +2548,19 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (ret != -ENOSPC)
break;
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
- if (cur_offset < ino_size) {
+ if (cur_offset < drop_end && cur_offset < ino_size) {
ret = fill_holes(trans, inode, path, cur_offset,
drop_end);
if (ret) {
+ /*
+ * If we failed then we didn't insert our hole
+ * entries for the area we dropped, so now the
+ * fs is corrupted, so we must abort the
+ * transaction.
+ */
+ btrfs_abort_transaction(trans, ret);
err = ret;
break;
}
@@ -2542,8 +2574,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
break;
}
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, rsv_count);
if (IS_ERR(trans)) {
@@ -2552,7 +2584,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
break;
}
- ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
@@ -2571,7 +2603,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_trans;
}
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
/*
* If we are using the NO_HOLES feature we might have had already an
* hole that overlaps a part of the region [lockstart, lockend] and
@@ -2593,6 +2625,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (cur_offset < ino_size && cur_offset < drop_end) {
ret = fill_holes(trans, inode, path, cur_offset, drop_end);
if (ret) {
+ /* Same comment as above. */
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_trans;
}
@@ -2605,14 +2639,14 @@ out_trans:
inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
updated_inode = true;
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
out_free:
btrfs_free_path(path);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
@@ -2630,7 +2664,7 @@ out_only_mutex:
err = PTR_ERR(trans);
} else {
err = btrfs_update_inode(trans, root, inode);
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
}
}
inode_unlock(inode);
@@ -2695,7 +2729,7 @@ static long btrfs_fallocate(struct file *file, int mode,
u64 locked_end;
u64 actual_end = 0;
struct extent_map *em;
- int blocksize = BTRFS_I(inode)->root->sectorsize;
+ int blocksize = btrfs_inode_sectorsize(inode);
int ret;
alloc_start = round_down(offset, blocksize);
@@ -2872,9 +2906,9 @@ static long btrfs_fallocate(struct file *file, int mode,
btrfs_ordered_update_i_size(inode, actual_end, NULL);
ret = btrfs_update_inode(trans, root, inode);
if (ret)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
else
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
}
}
out_unlock:
@@ -2891,7 +2925,7 @@ out:
static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 lockstart;
@@ -2909,10 +2943,11 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
*/
start = max_t(loff_t, 0, *offset);
- lockstart = round_down(start, root->sectorsize);
- lockend = round_up(i_size_read(inode), root->sectorsize);
+ lockstart = round_down(start, fs_info->sectorsize);
+ lockend = round_up(i_size_read(inode),
+ fs_info->sectorsize);
if (lockend <= lockstart)
- lockend = lockstart + root->sectorsize;
+ lockend = lockstart + fs_info->sectorsize;
lockend--;
len = lockend - lockstart + 1;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index e4b48f377d3a..7015892c9ee8 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -42,11 +42,16 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
+static int btrfs_wait_cache_io_root(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_io_ctl *io_ctl,
+ struct btrfs_path *path);
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_path *path,
u64 offset)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_key location;
struct btrfs_disk_key disk_key;
@@ -74,9 +79,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
btrfs_disk_key_to_cpu(&location, &disk_key);
btrfs_release_path(path);
- inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
- if (!inode)
- return ERR_PTR(-ENOENT);
+ inode = btrfs_iget(fs_info->sb, &location, root, NULL);
if (IS_ERR(inode))
return inode;
if (is_bad_inode(inode)) {
@@ -96,6 +99,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
*block_group, struct btrfs_path *path)
{
struct inode *inode = NULL;
+ struct btrfs_fs_info *fs_info = root->fs_info;
u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
spin_lock(&block_group->lock);
@@ -112,8 +116,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
spin_lock(&block_group->lock);
if (!((BTRFS_I(inode)->flags & flags) == flags)) {
- btrfs_info(root->fs_info,
- "Old style space inode found, converting.");
+ btrfs_info(fs_info, "Old style space inode found, converting.");
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
BTRFS_INODE_NODATACOW;
block_group->disk_cache_state = BTRFS_DC_CLEAR;
@@ -153,7 +156,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
btrfs_item_key(leaf, &disk_key, path->slots[0]);
- memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
+ memzero_extent_buffer(leaf, (unsigned long)inode_item,
sizeof(*inode_item));
btrfs_set_inode_generation(leaf, inode_item, trans->transid);
btrfs_set_inode_size(leaf, inode_item, 0);
@@ -181,7 +184,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
leaf = path->nodes[0];
header = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_free_space_header);
- memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
+ memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
@@ -205,15 +208,15 @@ int create_free_space_inode(struct btrfs_root *root,
block_group->key.objectid);
}
-int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
u64 needed_bytes;
int ret;
/* 1 for slack space, 1 for updating the inode */
- needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
- btrfs_calc_trans_metadata_size(root, 1);
+ needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) +
+ btrfs_calc_trans_metadata_size(fs_info, 1);
spin_lock(&rsv->lock);
if (rsv->reserved < needed_bytes)
@@ -244,9 +247,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
if (!list_empty(&block_group->io_list)) {
list_del_init(&block_group->io_list);
- btrfs_wait_cache_io(root, trans, block_group,
- &block_group->io_ctl, path,
- block_group->key.objectid);
+ btrfs_wait_cache_io(trans, block_group, path);
btrfs_put_block_group(block_group);
}
@@ -305,7 +306,7 @@ static int readahead_cache(struct inode *inode)
}
static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
- struct btrfs_root *root, int write)
+ int write)
{
int num_pages;
int check_crcs = 0;
@@ -327,7 +328,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
return -ENOMEM;
io_ctl->num_pages = num_pages;
- io_ctl->root = root;
+ io_ctl->fs_info = btrfs_sb(inode->i_sb);
io_ctl->check_crcs = check_crcs;
io_ctl->inode = inode;
@@ -450,7 +451,7 @@ static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
gen = io_ctl->cur;
if (le64_to_cpu(*gen) != generation) {
- btrfs_err_rl(io_ctl->root->fs_info,
+ btrfs_err_rl(io_ctl->fs_info,
"space cache generation (%llu) does not match inode (%llu)",
*gen, generation);
io_ctl_unmap_page(io_ctl);
@@ -476,7 +477,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
crc = btrfs_csum_data(io_ctl->orig + offset, crc,
PAGE_SIZE - offset);
- btrfs_csum_final(crc, (char *)&crc);
+ btrfs_csum_final(crc, (u8 *)&crc);
io_ctl_unmap_page(io_ctl);
tmp = page_address(io_ctl->pages[0]);
tmp += index;
@@ -504,9 +505,9 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
io_ctl_map_page(io_ctl, 0);
crc = btrfs_csum_data(io_ctl->orig + offset, crc,
PAGE_SIZE - offset);
- btrfs_csum_final(crc, (char *)&crc);
+ btrfs_csum_final(crc, (u8 *)&crc);
if (val != crc) {
- btrfs_err_rl(io_ctl->root->fs_info,
+ btrfs_err_rl(io_ctl->fs_info,
"csum mismatch on free space cache");
io_ctl_unmap_page(io_ctl);
return -EIO;
@@ -669,6 +670,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_path *path, u64 offset)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct btrfs_io_ctl io_ctl;
@@ -708,23 +710,23 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
btrfs_release_path(path);
if (!BTRFS_I(inode)->generation) {
- btrfs_info(root->fs_info,
+ btrfs_info(fs_info,
"The free space cache file (%llu) is invalid. skip it\n",
offset);
return 0;
}
if (BTRFS_I(inode)->generation != generation) {
- btrfs_err(root->fs_info,
- "free space inode generation (%llu) did not match free space cache generation (%llu)",
- BTRFS_I(inode)->generation, generation);
+ btrfs_err(fs_info,
+ "free space inode generation (%llu) did not match free space cache generation (%llu)",
+ BTRFS_I(inode)->generation, generation);
return 0;
}
if (!num_entries)
return 0;
- ret = io_ctl_init(&io_ctl, inode, root, 0);
+ ret = io_ctl_init(&io_ctl, inode, 0);
if (ret)
return ret;
@@ -766,7 +768,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ret = link_free_space(ctl, e);
spin_unlock(&ctl->tree_lock);
if (ret) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
@@ -786,7 +788,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
if (ret) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
@@ -1033,7 +1035,7 @@ fail:
}
static noinline_for_stack int
-write_pinned_extent_entries(struct btrfs_root *root,
+write_pinned_extent_entries(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_io_ctl *io_ctl,
int *entries)
@@ -1052,7 +1054,7 @@ write_pinned_extent_entries(struct btrfs_root *root,
* We shouldn't have switched the pinned extents yet so this is the
* right one
*/
- unpin = root->fs_info->pinned_extents;
+ unpin = fs_info->pinned_extents;
start = block_group->key.objectid;
@@ -1135,20 +1137,20 @@ cleanup_write_cache_enospc(struct inode *inode,
GFP_NOFS);
}
-int btrfs_wait_cache_io(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_io_ctl *io_ctl,
- struct btrfs_path *path, u64 offset)
+static int __btrfs_wait_cache_io(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_io_ctl *io_ctl,
+ struct btrfs_path *path, u64 offset)
{
int ret;
struct inode *inode = io_ctl->inode;
+ struct btrfs_fs_info *fs_info;
if (!inode)
return 0;
- if (block_group)
- root = root->fs_info->tree_root;
+ fs_info = btrfs_sb(inode->i_sb);
/* Flush the dirty pages in the cache file. */
ret = flush_dirty_cache(inode);
@@ -1165,9 +1167,9 @@ out:
BTRFS_I(inode)->generation = 0;
if (block_group) {
#ifdef DEBUG
- btrfs_err(root->fs_info,
- "failed to write free space cache for block group %llu",
- block_group->key.objectid);
+ btrfs_err(fs_info,
+ "failed to write free space cache for block group %llu",
+ block_group->key.objectid);
#endif
}
}
@@ -1200,6 +1202,23 @@ out:
}
+static int btrfs_wait_cache_io_root(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_io_ctl *io_ctl,
+ struct btrfs_path *path)
+{
+ return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
+}
+
+int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_path *path)
+{
+ return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
+ block_group, &block_group->io_ctl,
+ path, block_group->key.objectid);
+}
+
/**
* __btrfs_write_out_cache - write out cached info to an inode
* @root - the root the inode belongs to
@@ -1220,6 +1239,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 offset)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_state *cached_state = NULL;
LIST_HEAD(bitmap_list);
int entries = 0;
@@ -1231,7 +1251,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
return -EIO;
WARN_ON(io_ctl->pages);
- ret = io_ctl_init(io_ctl, inode, root, 1);
+ ret = io_ctl_init(io_ctl, inode, 1);
if (ret)
return ret;
@@ -1277,7 +1297,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
* If this changes while we are working we'll get added back to
* the dirty list and redo it. No locking needed
*/
- ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
+ ret = write_pinned_extent_entries(fs_info, block_group,
+ io_ctl, &entries);
if (ret)
goto out_nospc_locked;
@@ -1296,8 +1317,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_zero_remaining_pages(io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
- ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
- 0, i_size_read(inode), &cached_state);
+ ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
+ i_size_read(inode), &cached_state);
if (ret)
goto out_nospc;
@@ -1352,17 +1373,16 @@ out_nospc:
goto out;
}
-int btrfs_write_out_cache(struct btrfs_root *root,
+int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path)
{
+ struct btrfs_root *root = fs_info->tree_root;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct inode *inode;
int ret = 0;
- root = root->fs_info->tree_root;
-
spin_lock(&block_group->lock);
if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
spin_unlock(&block_group->lock);
@@ -1379,9 +1399,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
path, block_group->key.objectid);
if (ret) {
#ifdef DEBUG
- btrfs_err(root->fs_info,
- "failed to write free space cache for block group %llu",
- block_group->key.objectid);
+ btrfs_err(fs_info,
+ "failed to write free space cache for block group %llu",
+ block_group->key.objectid);
#endif
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
@@ -1968,11 +1988,11 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
struct btrfs_block_group_cache *block_group = ctl->private;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
bool forced = false;
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root,
- block_group))
+ if (btrfs_should_fragment_free_space(block_group))
forced = true;
#endif
@@ -1988,7 +2008,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* of cache left then go ahead an dadd them, no sense in adding
* the overhead of a bitmap if we don't have to.
*/
- if (info->bytes <= block_group->sectorsize * 4) {
+ if (info->bytes <= fs_info->sectorsize * 4) {
if (ctl->free_extents * 2 <= ctl->extents_thresh)
return false;
} else {
@@ -2447,6 +2467,7 @@ out:
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
struct rb_node *n;
@@ -2456,23 +2477,23 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes && !block_group->ro)
count++;
- btrfs_crit(block_group->fs_info,
- "entry offset %llu, bytes %llu, bitmap %s",
+ btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
info->offset, info->bytes,
(info->bitmap) ? "yes" : "no");
}
- btrfs_info(block_group->fs_info, "block group has cluster?: %s",
+ btrfs_info(fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes");
- btrfs_info(block_group->fs_info,
+ btrfs_info(fs_info,
"%d blocks of free space at or bigger than bytes is", count);
}
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
spin_lock_init(&ctl->tree_lock);
- ctl->unit = block_group->sectorsize;
+ ctl->unit = fs_info->sectorsize;
ctl->start = block_group->key.objectid;
ctl->private = block_group;
ctl->op = &free_space_op;
@@ -3011,7 +3032,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
* returns zero and sets up cluster if things worked out, otherwise
* it returns -enospc
*/
-int btrfs_find_space_cluster(struct btrfs_root *root,
+int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
@@ -3029,14 +3050,14 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
* For metadata, allow allocates with smaller extents. For
* data, keep it dense.
*/
- if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) {
+ if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
cont1_bytes = min_bytes = bytes + empty_size;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
cont1_bytes = bytes;
- min_bytes = block_group->sectorsize;
+ min_bytes = fs_info->sectorsize;
} else {
cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
- min_bytes = block_group->sectorsize;
+ min_bytes = fs_info->sectorsize;
}
spin_lock(&ctl->tree_lock);
@@ -3124,8 +3145,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
- ret = btrfs_discard_extent(fs_info->extent_root,
- start, bytes, &trimmed);
+ ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
if (!ret)
*total_trimmed += trimmed;
@@ -3321,6 +3341,7 @@ void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_map_tree *em_tree;
struct extent_map *em;
bool cleanup;
@@ -3331,8 +3352,8 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
spin_unlock(&block_group->lock);
if (cleanup) {
- lock_chunks(block_group->fs_info->chunk_root);
- em_tree = &block_group->fs_info->mapping_tree.map_tree;
+ mutex_lock(&fs_info->chunk_mutex);
+ em_tree = &fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, block_group->key.objectid,
1);
@@ -3343,7 +3364,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
*/
remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
- unlock_chunks(block_group->fs_info->chunk_root);
+ mutex_unlock(&fs_info->chunk_mutex);
/* once for us and once for the tree */
free_extent_map(em);
@@ -3473,7 +3494,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
int ret = 0;
u64 root_gen = btrfs_root_generation(&root->root_item);
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
/*
@@ -3512,12 +3533,13 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_path *path,
struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
int ret;
struct btrfs_io_ctl io_ctl;
bool release_metadata = true;
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
memset(&io_ctl, 0, sizeof(io_ctl));
@@ -3531,16 +3553,16 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
* with or without an error.
*/
release_metadata = false;
- ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
+ ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
}
if (ret) {
if (release_metadata)
btrfs_delalloc_release_metadata(inode, inode->i_size);
#ifdef DEBUG
- btrfs_err(root->fs_info,
- "failed to write free ino cache for root %llu",
- root->root_key.objectid);
+ btrfs_err(fs_info,
+ "failed to write free ino cache for root %llu",
+ root->root_key.objectid);
#endif
}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 363fdd955e5d..6f3c025a2c6c 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -59,7 +59,7 @@ int create_free_space_inode(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
-int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
@@ -67,12 +67,10 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct inode *inode);
int load_free_space_cache(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group);
-int btrfs_wait_cache_io(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
+int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
- struct btrfs_io_ctl *io_ctl,
- struct btrfs_path *path, u64 offset);
-int btrfs_write_out_cache(struct btrfs_root *root,
+ struct btrfs_path *path);
+int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
@@ -111,7 +109,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes);
-int btrfs_find_space_cluster(struct btrfs_root *root,
+int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size);
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 57401b474ec6..ff0c55337c2e 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -39,7 +39,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
* We convert to bitmaps when the disk space required for using extents
* exceeds that required for using bitmaps.
*/
- bitmap_range = cache->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
+ bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
bitmap_range);
bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
@@ -189,7 +189,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
int ret;
bitmap_size = free_space_bitmap_size(block_group->key.offset,
- block_group->sectorsize);
+ fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
ret = -ENOMEM;
@@ -227,9 +227,9 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ASSERT(found_key.objectid + found_key.offset <= end);
first = div_u64(found_key.objectid - start,
- block_group->sectorsize);
+ fs_info->sectorsize);
last = div_u64(found_key.objectid + found_key.offset - start,
- block_group->sectorsize);
+ fs_info->sectorsize);
le_bitmap_set(bitmap, first, last - first);
extent_count++;
@@ -270,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
}
bitmap_cursor = bitmap;
- bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
+ bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
i = start;
while (i < end) {
unsigned long ptr;
@@ -279,7 +279,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
extent_size = min(end - i, bitmap_range);
data_size = free_space_bitmap_size(extent_size,
- block_group->sectorsize);
+ fs_info->sectorsize);
key.objectid = i;
key.type = BTRFS_FREE_SPACE_BITMAP_KEY;
@@ -330,7 +330,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
int ret;
bitmap_size = free_space_bitmap_size(block_group->key.offset,
- block_group->sectorsize);
+ fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
ret = -ENOMEM;
@@ -370,11 +370,11 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
ASSERT(found_key.objectid + found_key.offset <= end);
bitmap_pos = div_u64(found_key.objectid - start,
- block_group->sectorsize *
+ fs_info->sectorsize *
BITS_PER_BYTE);
bitmap_cursor = bitmap + bitmap_pos;
data_size = free_space_bitmap_size(found_key.offset,
- block_group->sectorsize);
+ fs_info->sectorsize);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
read_extent_buffer(leaf, bitmap_cursor, ptr,
@@ -425,7 +425,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
extent_count++;
}
prev_bit = bit;
- offset += block_group->sectorsize;
+ offset += fs_info->sectorsize;
bitnr++;
}
if (prev_bit == 1) {
@@ -517,7 +517,8 @@ int free_space_test_bit(struct btrfs_block_group_cache *block_group,
ASSERT(offset >= found_start && offset < found_end);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
- i = div_u64(offset - found_start, block_group->sectorsize);
+ i = div_u64(offset - found_start,
+ block_group->fs_info->sectorsize);
return !!extent_buffer_test_bit(leaf, ptr, i);
}
@@ -525,6 +526,7 @@ static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
struct btrfs_path *path, u64 *start, u64 *size,
int bit)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_buffer *leaf;
struct btrfs_key key;
u64 end = *start + *size;
@@ -544,8 +546,8 @@ static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
end = found_end;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
- first = div_u64(*start - found_start, block_group->sectorsize);
- last = div_u64(end - found_start, block_group->sectorsize);
+ first = div_u64(*start - found_start, fs_info->sectorsize);
+ last = div_u64(end - found_start, fs_info->sectorsize);
if (bit)
extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else
@@ -606,7 +608,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
* that block is within the block group.
*/
if (start > block_group->key.objectid) {
- u64 prev_block = start - block_group->sectorsize;
+ u64 prev_block = start - block_group->fs_info->sectorsize;
key.objectid = prev_block;
key.type = (u8)-1;
@@ -1121,7 +1123,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
}
start = key.objectid;
if (key.type == BTRFS_METADATA_ITEM_KEY)
- start += fs_info->tree_root->nodesize;
+ start += fs_info->nodesize;
else
start += key.offset;
} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
@@ -1187,7 +1189,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
- ret = btrfs_commit_transaction(trans, tree_root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
@@ -1196,7 +1198,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
abort:
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans, tree_root);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -1267,7 +1269,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
list_del(&free_space_root->dirty_list);
btrfs_tree_lock(free_space_root->node);
- clean_tree_block(trans, tree_root->fs_info, free_space_root->node);
+ clean_tree_block(trans, fs_info, free_space_root->node);
btrfs_tree_unlock(free_space_root->node);
btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
0, 1);
@@ -1276,7 +1278,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
free_extent_buffer(free_space_root->commit_root);
kfree(free_space_root);
- ret = btrfs_commit_transaction(trans, tree_root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
@@ -1284,7 +1286,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
abort:
btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans, tree_root);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -1473,7 +1475,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
extent_count++;
}
prev_bit = bit;
- offset += block_group->sectorsize;
+ offset += fs_info->sectorsize;
}
}
if (prev_bit == 1) {
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index b8acc07ac6c2..39c968f80157 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -182,7 +182,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
- btrfs_truncate_item(root, path, item_size - del_len, 1);
+ btrfs_truncate_item(root->fs_info, path, item_size - del_len, 1);
out:
btrfs_free_path(path);
@@ -245,7 +245,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
- btrfs_truncate_item(root, path, item_size - sub_item_len, 1);
+ btrfs_truncate_item(root->fs_info, path, item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
@@ -297,7 +297,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
name, name_len, NULL))
goto out;
- btrfs_extend_item(root, path, ins_len);
+ btrfs_extend_item(root->fs_info, path, ins_len);
ret = 0;
}
if (ret < 0)
@@ -328,6 +328,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 index)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_inode_ref *ref;
@@ -354,7 +355,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
- btrfs_extend_item(root, path, ins_len);
+ btrfs_extend_item(fs_info, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
@@ -384,7 +385,7 @@ out:
btrfs_free_path(path);
if (ret == -EMLINK) {
- struct btrfs_super_block *disk_super = root->fs_info->super_copy;
+ struct btrfs_super_block *disk_super = fs_info->super_copy;
/* We ran out of space in the ref array. Need to
* add an extended ref. */
if (btrfs_super_incompat_flags(disk_super)
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index d27014b8bf72..144b119ff43f 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -38,7 +38,7 @@ static int caching_kthread(void *data)
int slot;
int ret;
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -180,7 +180,7 @@ static void start_caching(struct btrfs_root *root)
if (IS_ERR(tsk)) {
btrfs_warn(fs_info, "failed to start inode caching task");
btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
- "disabling inode map caching");
+ "disabling inode map caching");
}
}
@@ -395,6 +395,7 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root)
int btrfs_save_ino_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct btrfs_path *path;
struct inode *inode;
@@ -415,7 +416,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
if (btrfs_root_refs(&root->root_item) == 0)
return 0;
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -423,7 +424,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
return -ENOMEM;
rsv = trans->block_rsv;
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
num_bytes = trans->bytes_reserved;
/*
@@ -433,14 +434,14 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
* 1 item for free space object
* 3 items for pre-allocation
*/
- trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
+ trans->bytes_reserved = btrfs_calc_trans_metadata_size(fs_info, 10);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
trans->bytes_reserved,
BTRFS_RESERVE_NO_FLUSH);
if (ret)
goto out;
- trace_btrfs_space_reservation(root->fs_info, "ino_cache",
- trans->transid, trans->bytes_reserved, 1);
+ trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
+ trans->bytes_reserved, 1);
again:
inode = lookup_free_ino_inode(root, path);
if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
@@ -506,9 +507,10 @@ again:
out_put:
iput(inode);
out_release:
- trace_btrfs_space_reservation(root->fs_info, "ino_cache",
- trans->transid, trans->bytes_reserved, 0);
- btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
+ trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
+ trans->bytes_reserved, 0);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv,
+ trans->bytes_reserved);
out:
trans->block_rsv = rsv;
trans->bytes_reserved = num_bytes;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a4c879671b9d..c3b6ffa8e39d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -30,7 +30,6 @@
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
-#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
#include <linux/xattr.h>
@@ -250,11 +249,12 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
int compress_type,
struct page **compressed_pages)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
u64 isize = i_size_read(inode);
u64 actual_end = min(end + 1, isize);
u64 inline_len = actual_end - start;
- u64 aligned_end = ALIGN(end, root->sectorsize);
+ u64 aligned_end = ALIGN(end, fs_info->sectorsize);
u64 data_len = inline_len;
int ret;
struct btrfs_path *path;
@@ -265,12 +265,12 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
data_len = compressed_size;
if (start > 0 ||
- actual_end > root->sectorsize ||
- data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
+ actual_end > fs_info->sectorsize ||
+ data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
(!compressed_size &&
- (actual_end & (root->sectorsize - 1)) == 0) ||
+ (actual_end & (fs_info->sectorsize - 1)) == 0) ||
end + 1 < isize ||
- data_len > root->fs_info->max_inline) {
+ data_len > fs_info->max_inline) {
return 1;
}
@@ -283,7 +283,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
btrfs_free_path(path);
return PTR_ERR(trans);
}
- trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+ trans->block_rsv = &fs_info->delalloc_block_rsv;
if (compressed_size && compressed_pages)
extent_item_size = btrfs_file_extent_calc_inline_size(
@@ -326,7 +326,7 @@ out:
*/
btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
btrfs_free_path(path);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -373,15 +373,15 @@ static noinline int add_async_extent(struct async_cow *cow,
static inline int inode_need_compress(struct inode *inode)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
/* force compress */
- if (btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
+ if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
return 1;
/* bad compression ratios */
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
return 0;
- if (btrfs_test_opt(root->fs_info, COMPRESS) ||
+ if (btrfs_test_opt(fs_info, COMPRESS) ||
BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
BTRFS_I(inode)->force_compress)
return 1;
@@ -411,9 +411,10 @@ static noinline void compress_file_range(struct inode *inode,
struct async_cow *async_cow,
int *num_added)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 num_bytes;
- u64 blocksize = root->sectorsize;
+ u64 blocksize = fs_info->sectorsize;
u64 actual_end;
u64 isize = i_size_read(inode);
int ret = 0;
@@ -426,7 +427,7 @@ static noinline void compress_file_range(struct inode *inode,
unsigned long max_uncompressed = SZ_128K;
int i;
int will_compress;
- int compress_type = root->fs_info->compress_type;
+ int compress_type = fs_info->compress_type;
int redirty = 0;
/* if this is a small write inside eof, kick off a defrag */
@@ -625,7 +626,7 @@ cont:
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
- if (!btrfs_test_opt(root->fs_info, FORCE_COMPRESS) &&
+ if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
!(BTRFS_I(inode)->force_compress)) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
@@ -683,6 +684,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
static noinline void submit_compressed_extents(struct inode *inode,
struct async_cow *async_cow)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct async_extent *async_extent;
u64 alloc_hint = 0;
struct btrfs_key ins;
@@ -795,7 +797,7 @@ retry:
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
em->ram_bytes = async_extent->ram_size;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
em->compress_type = async_extent->compress_type;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
@@ -830,7 +832,7 @@ retry:
async_extent->ram_size - 1, 0);
goto out_free_reserve;
}
- btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
/*
* clear dirty, set writeback and unlock the pages.
@@ -871,8 +873,8 @@ retry:
}
return;
out_free_reserve:
- btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
- btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_free:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
@@ -940,13 +942,14 @@ static noinline int cow_file_range(struct inode *inode,
int *page_started, unsigned long *nr_written,
int unlock, struct btrfs_dedupe_hash *hash)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 alloc_hint = 0;
u64 num_bytes;
unsigned long ram_size;
u64 disk_num_bytes;
u64 cur_alloc_size;
- u64 blocksize = root->sectorsize;
+ u64 blocksize = fs_info->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -990,7 +993,7 @@ static noinline int cow_file_range(struct inode *inode,
}
BUG_ON(disk_num_bytes >
- btrfs_super_total_bytes(root->fs_info->super_copy));
+ btrfs_super_total_bytes(fs_info->super_copy));
alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
@@ -1000,7 +1003,7 @@ static noinline int cow_file_range(struct inode *inode,
cur_alloc_size = disk_num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
- root->sectorsize, 0, alloc_hint,
+ fs_info->sectorsize, 0, alloc_hint,
&ins, 1, 1);
if (ret < 0)
goto out_unlock;
@@ -1021,7 +1024,7 @@ static noinline int cow_file_range(struct inode *inode,
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
em->ram_bytes = ram_size;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
em->generation = -1;
@@ -1053,7 +1056,7 @@ static noinline int cow_file_range(struct inode *inode,
goto out_drop_extent_cache;
}
- btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
if (disk_num_bytes < cur_alloc_size)
break;
@@ -1084,8 +1087,8 @@ out:
out_drop_extent_cache:
btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
out_reserve:
- btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
- btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_unlock:
extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
locked_page,
@@ -1119,6 +1122,7 @@ static noinline void async_cow_start(struct btrfs_work *work)
*/
static noinline void async_cow_submit(struct btrfs_work *work)
{
+ struct btrfs_fs_info *fs_info;
struct async_cow *async_cow;
struct btrfs_root *root;
unsigned long nr_pages;
@@ -1126,16 +1130,17 @@ static noinline void async_cow_submit(struct btrfs_work *work)
async_cow = container_of(work, struct async_cow, work);
root = async_cow->root;
+ fs_info = root->fs_info;
nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
PAGE_SHIFT;
/*
* atomic_sub_return implies a barrier for waitqueue_active
*/
- if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
+ if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
5 * SZ_1M &&
- waitqueue_active(&root->fs_info->async_submit_wait))
- wake_up(&root->fs_info->async_submit_wait);
+ waitqueue_active(&fs_info->async_submit_wait))
+ wake_up(&fs_info->async_submit_wait);
if (async_cow->inode)
submit_compressed_extents(async_cow->inode, async_cow);
@@ -1154,6 +1159,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct async_cow *async_cow;
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages;
@@ -1171,7 +1177,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow->start = start;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
- !btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
+ !btrfs_test_opt(fs_info, FORCE_COMPRESS))
cur_end = end;
else
cur_end = min(end, start + SZ_512K - 1);
@@ -1186,22 +1192,21 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
nr_pages = (cur_end - start + PAGE_SIZE) >>
PAGE_SHIFT;
- atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
+ atomic_add(nr_pages, &fs_info->async_delalloc_pages);
- btrfs_queue_work(root->fs_info->delalloc_workers,
- &async_cow->work);
+ btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
- if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
- wait_event(root->fs_info->async_submit_wait,
- (atomic_read(&root->fs_info->async_delalloc_pages) <
- limit));
+ if (atomic_read(&fs_info->async_delalloc_pages) > limit) {
+ wait_event(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->async_delalloc_pages) <
+ limit));
}
- while (atomic_read(&root->fs_info->async_submit_draining) &&
- atomic_read(&root->fs_info->async_delalloc_pages)) {
- wait_event(root->fs_info->async_submit_wait,
- (atomic_read(&root->fs_info->async_delalloc_pages) ==
- 0));
+ while (atomic_read(&fs_info->async_submit_draining) &&
+ atomic_read(&fs_info->async_delalloc_pages)) {
+ wait_event(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->async_delalloc_pages) ==
+ 0));
}
*nr_written += nr_pages;
@@ -1211,14 +1216,14 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
return 0;
}
-static noinline int csum_exist_in_range(struct btrfs_root *root,
+static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
int ret;
struct btrfs_ordered_sum *sums;
LIST_HEAD(list);
- ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
+ ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
bytenr + num_bytes - 1, &list, 0);
if (ret == 0 && list_empty(&list))
return 0;
@@ -1243,6 +1248,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
u64 start, u64 end, int *page_started, int force,
unsigned long *nr_written)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_buffer *leaf;
@@ -1298,7 +1304,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
return PTR_ERR(trans);
}
- trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+ trans->block_rsv = &fs_info->delalloc_block_rsv;
cow_start = (u64)-1;
cur_offset = start;
@@ -1374,7 +1380,7 @@ next_slot:
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
- if (btrfs_extent_readonly(root, disk_bytenr))
+ if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
@@ -1397,17 +1403,18 @@ next_slot:
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
- if (csum_exist_in_range(root, disk_bytenr, num_bytes))
+ if (csum_exist_in_range(fs_info, disk_bytenr,
+ num_bytes))
goto out_check;
- if (!btrfs_inc_nocow_writers(root->fs_info,
- disk_bytenr))
+ if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
goto out_check;
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
btrfs_file_extent_inline_len(leaf,
path->slots[0], fi);
- extent_end = ALIGN(extent_end, root->sectorsize);
+ extent_end = ALIGN(extent_end,
+ fs_info->sectorsize);
} else {
BUG_ON(1);
}
@@ -1417,8 +1424,7 @@ out_check:
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
if (nocow)
- btrfs_dec_nocow_writers(root->fs_info,
- disk_bytenr);
+ btrfs_dec_nocow_writers(fs_info, disk_bytenr);
goto next_slot;
}
if (!nocow) {
@@ -1441,7 +1447,7 @@ out_check:
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
if (nocow)
- btrfs_dec_nocow_writers(root->fs_info,
+ btrfs_dec_nocow_writers(fs_info,
disk_bytenr);
goto error;
}
@@ -1461,7 +1467,7 @@ out_check:
em->block_start = disk_bytenr;
em->orig_block_len = disk_num_bytes;
em->ram_bytes = ram_bytes;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
em->mod_start = em->start;
em->mod_len = em->len;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
@@ -1486,7 +1492,7 @@ out_check:
ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
num_bytes, num_bytes, type);
if (nocow)
- btrfs_dec_nocow_writers(root->fs_info, disk_bytenr);
+ btrfs_dec_nocow_writers(fs_info, disk_bytenr);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
@@ -1528,7 +1534,7 @@ out_check:
}
error:
- err = btrfs_end_transaction(trans, root);
+ err = btrfs_end_transaction(trans);
if (!ret)
ret = err;
@@ -1693,6 +1699,8 @@ static void btrfs_merge_extent_hook(struct inode *inode,
static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
spin_lock(&root->delalloc_lock);
if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
@@ -1701,11 +1709,11 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
&BTRFS_I(inode)->runtime_flags);
root->nr_delalloc_inodes++;
if (root->nr_delalloc_inodes == 1) {
- spin_lock(&root->fs_info->delalloc_root_lock);
+ spin_lock(&fs_info->delalloc_root_lock);
BUG_ON(!list_empty(&root->delalloc_root));
list_add_tail(&root->delalloc_root,
- &root->fs_info->delalloc_roots);
- spin_unlock(&root->fs_info->delalloc_root_lock);
+ &fs_info->delalloc_roots);
+ spin_unlock(&fs_info->delalloc_root_lock);
}
}
spin_unlock(&root->delalloc_lock);
@@ -1714,6 +1722,8 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
static void btrfs_del_delalloc_inode(struct btrfs_root *root,
struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
spin_lock(&root->delalloc_lock);
if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
@@ -1721,10 +1731,10 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
&BTRFS_I(inode)->runtime_flags);
root->nr_delalloc_inodes--;
if (!root->nr_delalloc_inodes) {
- spin_lock(&root->fs_info->delalloc_root_lock);
+ spin_lock(&fs_info->delalloc_root_lock);
BUG_ON(list_empty(&root->delalloc_root));
list_del_init(&root->delalloc_root);
- spin_unlock(&root->fs_info->delalloc_root_lock);
+ spin_unlock(&fs_info->delalloc_root_lock);
}
}
spin_unlock(&root->delalloc_lock);
@@ -1739,6 +1749,8 @@ static void btrfs_set_bit_hook(struct inode *inode,
struct extent_state *state, unsigned *bits)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
WARN_ON(1);
/*
@@ -1760,11 +1772,11 @@ static void btrfs_set_bit_hook(struct inode *inode,
}
/* For sanity tests */
- if (btrfs_is_testing(root->fs_info))
+ if (btrfs_is_testing(fs_info))
return;
- __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
- root->fs_info->delalloc_batch);
+ __percpu_counter_add(&fs_info->delalloc_bytes, len,
+ fs_info->delalloc_batch);
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes += len;
if (*bits & EXTENT_DEFRAG)
@@ -1783,6 +1795,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
struct extent_state *state,
unsigned *bits)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 len = state->end + 1 - state->start;
u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
BTRFS_MAX_EXTENT_SIZE);
@@ -1815,11 +1828,11 @@ static void btrfs_clear_bit_hook(struct inode *inode,
* error.
*/
if (*bits & EXTENT_DO_ACCOUNTING &&
- root != root->fs_info->tree_root)
+ root != fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, len);
/* For sanity tests. */
- if (btrfs_is_testing(root->fs_info))
+ if (btrfs_is_testing(fs_info))
return;
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
@@ -1829,8 +1842,8 @@ static void btrfs_clear_bit_hook(struct inode *inode,
btrfs_free_reserved_data_space_noquota(inode,
state->start, len);
- __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
- root->fs_info->delalloc_batch);
+ __percpu_counter_add(&fs_info->delalloc_bytes, -len,
+ fs_info->delalloc_batch);
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes -= len;
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
@@ -1853,7 +1866,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags)
{
- struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+ struct inode *inode = page->mapping->host;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 logical = (u64)bio->bi_iter.bi_sector << 9;
u64 length = 0;
u64 map_length;
@@ -1864,8 +1878,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
length = bio->bi_iter.bi_size;
map_length = length;
- ret = btrfs_map_block(root->fs_info, bio_op(bio), logical,
- &map_length, NULL, 0);
+ ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
+ NULL, 0);
if (ret < 0)
return ret;
if (map_length < length + size)
@@ -1885,10 +1899,9 @@ static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
- ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+ ret = btrfs_csum_one_bio(inode, bio, 0, 0);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
@@ -1905,10 +1918,10 @@ static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
- ret = btrfs_map_bio(root, bio, mirror_num, 1);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@@ -1924,6 +1937,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
int ret = 0;
@@ -1936,7 +1950,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
if (bio_op(bio) != REQ_OP_WRITE) {
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
+ ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
if (ret)
goto out;
@@ -1946,7 +1960,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
bio_flags);
goto out;
} else if (!skip_sum) {
- ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
+ ret = btrfs_lookup_bio_sums(inode, bio, NULL);
if (ret)
goto out;
}
@@ -1956,20 +1970,19 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit;
/* we're doing a write, do the async checksumming */
- ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, bio, mirror_num,
- bio_flags, bio_offset,
- __btrfs_submit_bio_start,
- __btrfs_submit_bio_done);
+ ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num,
+ bio_flags, bio_offset,
+ __btrfs_submit_bio_start,
+ __btrfs_submit_bio_done);
goto out;
} else if (!skip_sum) {
- ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+ ret = btrfs_csum_one_bio(inode, bio, 0, 0);
if (ret)
goto out;
}
mapit:
- ret = btrfs_map_bio(root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
out:
if (ret < 0) {
@@ -2090,8 +2103,8 @@ out_page:
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_writepage_fixup *fixup;
- struct btrfs_root *root = BTRFS_I(inode)->root;
/* this page is properly in the ordered list */
if (TestClearPagePrivate2(page))
@@ -2109,7 +2122,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
btrfs_init_work(&fixup->work, btrfs_fixup_helper,
btrfs_writepage_fixup_worker, NULL, NULL);
fixup->page = page;
- btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
+ btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
return -EBUSY;
}
@@ -2180,10 +2193,9 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
- ret = btrfs_alloc_reserved_file_extent(trans, root,
- root->root_key.objectid,
- btrfs_ino(inode), file_pos,
- ram_bytes, &ins);
+ ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid,
+ btrfs_ino(inode), file_pos,
+ ram_bytes, &ins);
/*
* Release the reserved range from inode dirty range map, as it is
* already moved into delayed_ref_head
@@ -2293,7 +2305,6 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
void *ctx)
{
struct btrfs_file_extent_item *extent;
- struct btrfs_fs_info *fs_info;
struct old_sa_defrag_extent *old = ctx;
struct new_sa_defrag_extent *new = old->new;
struct btrfs_path *path = new->path;
@@ -2302,6 +2313,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
struct sa_defrag_extent_backref *backref;
struct extent_buffer *leaf;
struct inode *inode = new->inode;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int slot;
int ret;
u64 extent_offset;
@@ -2315,7 +2327,6 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
- fs_info = BTRFS_I(inode)->root->fs_info;
root = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(root)) {
if (PTR_ERR(root) == -ENOENT)
@@ -2413,7 +2424,7 @@ out:
static noinline bool record_extent_backrefs(struct btrfs_path *path,
struct new_sa_defrag_extent *new)
{
- struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
+ struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
struct old_sa_defrag_extent *old, *tmp;
int ret;
@@ -2471,13 +2482,12 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
struct btrfs_file_extent_item *item;
struct btrfs_ordered_extent *ordered;
struct btrfs_trans_handle *trans;
- struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
struct btrfs_key key;
struct extent_buffer *leaf;
struct old_sa_defrag_extent *old = backref->old;
struct new_sa_defrag_extent *new = old->new;
- struct inode *src_inode = new->inode;
+ struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
struct inode *inode;
struct extent_state *cached = NULL;
int ret = 0;
@@ -2498,7 +2508,6 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
- fs_info = BTRFS_I(src_inode)->root->fs_info;
index = srcu_read_lock(&fs_info->subvol_srcu);
root = btrfs_read_fs_root_no_name(fs_info, &key);
@@ -2643,7 +2652,7 @@ again:
inode_add_bytes(inode, len);
btrfs_release_path(path);
- ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
+ ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
new->disk_len, 0,
backref->root_id, backref->inum,
new->file_pos); /* start - extent_offset */
@@ -2656,7 +2665,7 @@ again:
out_free_path:
btrfs_release_path(path);
path->leave_spinning = 0;
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
&cached, GFP_NOFS);
@@ -2679,6 +2688,7 @@ static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
static void relink_file_extents(struct new_sa_defrag_extent *new)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
struct btrfs_path *path;
struct sa_defrag_extent_backref *backref;
struct sa_defrag_extent_backref *prev = NULL;
@@ -2725,14 +2735,15 @@ static void relink_file_extents(struct new_sa_defrag_extent *new)
out:
free_sa_defrag_extent(new);
- atomic_dec(&root->fs_info->defrag_running);
- wake_up(&root->fs_info->transaction_wait);
+ atomic_dec(&fs_info->defrag_running);
+ wake_up(&fs_info->transaction_wait);
}
static struct new_sa_defrag_extent *
record_old_file_extents(struct inode *inode,
struct btrfs_ordered_extent *ordered)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -2831,7 +2842,7 @@ next:
}
btrfs_free_path(path);
- atomic_inc(&root->fs_info->defrag_running);
+ atomic_inc(&fs_info->defrag_running);
return new;
@@ -2842,12 +2853,12 @@ out_kfree:
return NULL;
}
-static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
+static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
struct btrfs_block_group_cache *cache;
- cache = btrfs_lookup_block_group(root->fs_info, start);
+ cache = btrfs_lookup_block_group(fs_info, start);
ASSERT(cache);
spin_lock(&cache->lock);
@@ -2864,6 +2875,7 @@ static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
{
struct inode *inode = ordered_extent->inode;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -2914,7 +2926,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans = NULL;
goto out;
}
- trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+ trans->block_rsv = &fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
@@ -2949,7 +2961,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out_unlock;
}
- trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+ trans->block_rsv = &fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
@@ -2960,7 +2972,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->file_offset +
logical_len);
} else {
- BUG_ON(root == root->fs_info->tree_root);
+ BUG_ON(root == fs_info->tree_root);
ret = insert_reserved_file_extent(trans, inode,
ordered_extent->file_offset,
ordered_extent->start,
@@ -2969,7 +2981,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
if (!ret)
- btrfs_release_delalloc_bytes(root,
+ btrfs_release_delalloc_bytes(fs_info,
ordered_extent->start,
ordered_extent->disk_len);
}
@@ -2996,10 +3008,10 @@ out_unlock:
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
out:
- if (root != root->fs_info->tree_root)
+ if (root != fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, ordered_extent->len);
if (trans)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret || truncated) {
u64 start, end;
@@ -3023,7 +3035,8 @@ out:
if ((ret || !logical_len) &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
- btrfs_free_reserved_extent(root, ordered_extent->start,
+ btrfs_free_reserved_extent(fs_info,
+ ordered_extent->start,
ordered_extent->disk_len, 1);
}
@@ -3038,7 +3051,7 @@ out:
if (new) {
if (ret) {
free_sa_defrag_extent(new);
- atomic_dec(&root->fs_info->defrag_running);
+ atomic_dec(&fs_info->defrag_running);
} else {
relink_file_extents(new);
}
@@ -3063,7 +3076,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
struct inode *inode = page->mapping->host;
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_extent *ordered_extent = NULL;
struct btrfs_workqueue *wq;
btrfs_work_func_t func;
@@ -3076,10 +3089,10 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
return 0;
if (btrfs_is_free_space_inode(inode)) {
- wq = root->fs_info->endio_freespace_worker;
+ wq = fs_info->endio_freespace_worker;
func = btrfs_freespace_write_helper;
} else {
- wq = root->fs_info->endio_write_workers;
+ wq = fs_info->endio_write_workers;
func = btrfs_endio_write_helper;
}
@@ -3103,7 +3116,7 @@ static int __readpage_endio_check(struct inode *inode,
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(kaddr + pgoff, csum, len);
- btrfs_csum_final(csum, (char *)&csum);
+ btrfs_csum_final(csum, (u8 *)&csum);
if (csum != csum_expected)
goto zeroit;
@@ -3156,7 +3169,7 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
void btrfs_add_delayed_iput(struct inode *inode)
{
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_inode *binode = BTRFS_I(inode);
if (atomic_add_unless(&inode->i_count, -1, 1))
@@ -3172,9 +3185,8 @@ void btrfs_add_delayed_iput(struct inode *inode)
spin_unlock(&fs_info->delayed_iput_lock);
}
-void btrfs_run_delayed_iputs(struct btrfs_root *root)
+void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->delayed_iput_lock);
while (!list_empty(&fs_info->delayed_iputs)) {
@@ -3204,6 +3216,7 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *block_rsv;
int ret;
@@ -3228,7 +3241,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
btrfs_root_refs(&root->root_item) > 0) {
- ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
+ ret = btrfs_del_orphan_item(trans, fs_info->tree_root,
root->root_key.objectid);
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -3239,7 +3252,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
if (block_rsv) {
WARN_ON(block_rsv->size > 0);
- btrfs_free_block_rsv(root, block_rsv);
+ btrfs_free_block_rsv(fs_info, block_rsv);
}
}
@@ -3252,6 +3265,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
*/
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = NULL;
int reserve = 0;
@@ -3259,7 +3273,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
int ret;
if (!root->orphan_block_rsv) {
- block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ block_rsv = btrfs_alloc_block_rsv(fs_info,
+ BTRFS_BLOCK_RSV_TEMP);
if (!block_rsv)
return -ENOMEM;
}
@@ -3268,7 +3283,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
if (!root->orphan_block_rsv) {
root->orphan_block_rsv = block_rsv;
} else if (block_rsv) {
- btrfs_free_block_rsv(root, block_rsv);
+ btrfs_free_block_rsv(fs_info, block_rsv);
block_rsv = NULL;
}
@@ -3331,7 +3346,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
/* insert an orphan item to track subvolume contains orphan files */
if (insert >= 2) {
- ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
+ ret = btrfs_insert_orphan_item(trans, fs_info->tree_root,
root->root_key.objectid);
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, ret);
@@ -3382,6 +3397,7 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
*/
int btrfs_orphan_cleanup(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key, found_key;
@@ -3441,8 +3457,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
*/
if (found_key.offset == last_objectid) {
- btrfs_err(root->fs_info,
- "Error removing orphan entry, stopping orphan cleanup");
+ btrfs_err(fs_info,
+ "Error removing orphan entry, stopping orphan cleanup");
ret = -EINVAL;
goto out;
}
@@ -3452,12 +3468,12 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
- inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
ret = PTR_ERR_OR_ZERO(inode);
if (ret && ret != -ENOENT)
goto out;
- if (ret == -ENOENT && root == root->fs_info->tree_root) {
+ if (ret == -ENOENT && root == fs_info->tree_root) {
struct btrfs_root *dead_root;
struct btrfs_fs_info *fs_info = root->fs_info;
int is_dead_root = 0;
@@ -3499,11 +3515,11 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
ret = PTR_ERR(trans);
goto out;
}
- btrfs_debug(root->fs_info, "auto deleting %Lu",
- found_key.objectid);
+ btrfs_debug(fs_info, "auto deleting %Lu",
+ found_key.objectid);
ret = btrfs_del_orphan_item(trans, root,
found_key.objectid);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret)
goto out;
continue;
@@ -3533,7 +3549,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
goto out;
}
ret = btrfs_orphan_add(trans, inode);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret) {
iput(inode);
goto out;
@@ -3557,25 +3573,24 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
- btrfs_block_rsv_release(root, root->orphan_block_rsv,
+ btrfs_block_rsv_release(fs_info, root->orphan_block_rsv,
(u64)-1);
if (root->orphan_block_rsv ||
test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
trans = btrfs_join_transaction(root);
if (!IS_ERR(trans))
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
}
if (nr_unlink)
- btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
+ btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
if (nr_truncate)
- btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
+ btrfs_debug(fs_info, "truncated %d orphans", nr_truncate);
out:
if (ret)
- btrfs_err(root->fs_info,
- "could not do orphan cleanup %d", ret);
+ btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
btrfs_free_path(path);
return ret;
}
@@ -3654,6 +3669,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
*/
static int btrfs_read_locked_inode(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
@@ -3734,7 +3750,7 @@ cache_index:
* This is required for both inode re-read from disk and delayed inode
* in delayed_nodes_tree.
*/
- if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
+ if (BTRFS_I(inode)->last_trans == fs_info->generation)
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
@@ -3800,7 +3816,7 @@ cache_acl:
path->slots[0] = first_xattr_slot;
ret = btrfs_load_inode_props(inode, path);
if (ret)
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"error loading props for ino %llu (root %llu): %d",
btrfs_ino(inode),
root->root_key.objectid, ret);
@@ -3819,7 +3835,7 @@ cache_acl:
break;
case S_IFDIR:
inode->i_fop = &btrfs_dir_file_operations;
- if (root == root->fs_info->tree_root)
+ if (root == fs_info->tree_root)
inode->i_op = &btrfs_dir_ro_inode_operations;
else
inode->i_op = &btrfs_dir_inode_operations;
@@ -3937,6 +3953,7 @@ failed:
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
/*
@@ -3948,7 +3965,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
*/
if (!btrfs_is_free_space_inode(inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
- && !test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
+ && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
btrfs_update_root_times(trans, root);
ret = btrfs_delayed_update_inode(trans, root, inode);
@@ -3982,6 +3999,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
int ret = 0;
struct extent_buffer *leaf;
@@ -4036,14 +4054,14 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
dir_ino, &index);
if (ret) {
- btrfs_info(root->fs_info,
+ btrfs_info(fs_info,
"failed to delete reference to %.*s, inode %llu parent %llu",
name_len, name, ino, dir_ino);
btrfs_abort_transaction(trans, ret);
goto err;
}
skip_backref:
- ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+ ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto err;
@@ -4138,8 +4156,8 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
}
out:
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(root->fs_info);
return ret;
}
@@ -4148,6 +4166,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct inode *dir, u64 objectid,
const char *name, int name_len)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
@@ -4180,9 +4199,9 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
- objectid, root->root_key.objectid,
- dir_ino, &index, name, name_len);
+ ret = btrfs_del_root_ref(trans, fs_info, objectid,
+ root->root_key.objectid, dir_ino,
+ &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
@@ -4206,7 +4225,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+ ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -4274,8 +4293,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
}
out:
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(root->fs_info);
return err;
}
@@ -4284,18 +4303,19 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytes_deleted)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
/*
* This is only used to apply pressure to the enospc system, we don't
* intend to use this reservation at all.
*/
- bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
- bytes_deleted *= root->nodesize;
- ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
+ bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
+ bytes_deleted *= fs_info->nodesize;
+ ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
if (!ret) {
- trace_btrfs_space_reservation(root->fs_info, "transaction",
+ trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid,
bytes_deleted, 1);
trans->bytes_reserved += bytes_deleted;
@@ -4338,7 +4358,7 @@ static int truncate_inline_extent(struct inode *inode,
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
size = btrfs_file_extent_calc_inline_size(size);
- btrfs_truncate_item(root, path, size, 1);
+ btrfs_truncate_item(root->fs_info, path, size, 1);
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
inode_sub_bytes(inode, item_end + 1 - new_size);
@@ -4362,6 +4382,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct inode *inode,
u64 new_size, u32 min_type)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
@@ -4407,9 +4428,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* extent just the way it is.
*/
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
- root == root->fs_info->tree_root)
+ root == fs_info->tree_root)
btrfs_drop_extent_cache(inode, ALIGN(new_size,
- root->sectorsize), (u64)-1, 0);
+ fs_info->sectorsize),
+ (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
@@ -4431,7 +4453,7 @@ search_again:
* bytes_deleted is > 0, it will be huge by the time we get here
*/
if (be_nice && bytes_deleted > SZ_32M) {
- if (btrfs_should_end_transaction(trans, root)) {
+ if (btrfs_should_end_transaction(trans)) {
err = -EAGAIN;
goto error;
}
@@ -4508,7 +4530,7 @@ search_again:
btrfs_file_extent_num_bytes(leaf, fi);
extent_num_bytes = ALIGN(new_size -
found_key.offset,
- root->sectorsize);
+ fs_info->sectorsize);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_num_bytes);
num_dec = (orig_num_bytes -
@@ -4595,16 +4617,16 @@ delete:
if (found_extent &&
(test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
- root == root->fs_info->tree_root)) {
+ root == fs_info->tree_root)) {
btrfs_set_path_blocking(path);
bytes_deleted += extent_num_bytes;
- ret = btrfs_free_extent(trans, root, extent_start,
+ ret = btrfs_free_extent(trans, fs_info, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset);
BUG_ON(ret);
- if (btrfs_should_throttle_delayed_refs(trans, root))
- btrfs_async_run_delayed_refs(root,
+ if (btrfs_should_throttle_delayed_refs(trans, fs_info))
+ btrfs_async_run_delayed_refs(fs_info,
trans->delayed_ref_updates * 2,
trans->transid, 0);
if (be_nice) {
@@ -4613,9 +4635,8 @@ delete:
should_end = 1;
}
if (btrfs_should_throttle_delayed_refs(trans,
- root)) {
+ fs_info))
should_throttle = 1;
- }
}
}
@@ -4640,7 +4661,9 @@ delete:
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
- ret = btrfs_run_delayed_refs(trans, root, updates * 2);
+ ret = btrfs_run_delayed_refs(trans,
+ fs_info,
+ updates * 2);
if (ret && !err)
err = ret;
}
@@ -4675,7 +4698,8 @@ error:
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
- ret = btrfs_run_delayed_refs(trans, root, updates * 2);
+ ret = btrfs_run_delayed_refs(trans, fs_info,
+ updates * 2);
if (ret && !err)
err = ret;
}
@@ -4697,13 +4721,13 @@ error:
int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
int front)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct address_space *mapping = inode->i_mapping;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
- u32 blocksize = root->sectorsize;
+ u32 blocksize = fs_info->sectorsize;
pgoff_t index = from >> PAGE_SHIFT;
unsigned offset = from & (blocksize - 1);
struct page *page;
@@ -4807,6 +4831,7 @@ out:
static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
u64 offset, u64 len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
int ret;
@@ -4814,8 +4839,8 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
* Still need to make sure the inode looks like it's been updated so
* that any holes get logged if we fsync.
*/
- if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
- BTRFS_I(inode)->last_trans = root->fs_info->generation;
+ if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
+ BTRFS_I(inode)->last_trans = fs_info->generation;
BTRFS_I(inode)->last_sub_trans = root->log_transid;
BTRFS_I(inode)->last_log_commit = root->last_log_commit;
return 0;
@@ -4833,7 +4858,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
if (ret) {
btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -4843,7 +4868,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
btrfs_abort_transaction(trans, ret);
else
btrfs_update_inode(trans, root, inode);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -4855,13 +4880,14 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
*/
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- u64 hole_start = ALIGN(oldsize, root->sectorsize);
- u64 block_end = ALIGN(size, root->sectorsize);
+ u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
+ u64 block_end = ALIGN(size, fs_info->sectorsize);
u64 last_byte;
u64 cur_offset;
u64 hole_size;
@@ -4904,7 +4930,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
break;
}
last_byte = min(extent_map_end(em), block_end);
- last_byte = ALIGN(last_byte , root->sectorsize);
+ last_byte = ALIGN(last_byte, fs_info->sectorsize);
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
struct extent_map *hole_em;
hole_size = last_byte - cur_offset;
@@ -4929,9 +4955,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
hole_em->ram_bytes = hole_size;
- hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
+ hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
- hole_em->generation = root->fs_info->generation;
+ hole_em->generation = fs_info->generation;
while (1) {
write_lock(&em_tree->lock);
@@ -5006,7 +5032,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
pagecache_isize_extended(inode, oldsize, newsize);
ret = btrfs_update_inode(trans, root, inode);
btrfs_end_write_no_snapshoting(root);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
} else {
/*
@@ -5037,7 +5063,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* will be consistent.
*/
ret = btrfs_orphan_add(trans, inode);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret)
return ret;
@@ -5068,7 +5094,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
err = btrfs_orphan_del(trans, inode);
if (err)
btrfs_abort_transaction(trans, err);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
}
}
@@ -5201,6 +5227,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
void btrfs_evict_inode(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
@@ -5215,7 +5242,7 @@ void btrfs_evict_inode(struct inode *inode)
return;
}
- min_size = btrfs_calc_trunc_metadata_size(root, 1);
+ min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
evict_inode_truncate_pages(inode);
@@ -5235,7 +5262,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_free_io_failure_record(inode, 0, (u64)-1);
- if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags));
goto no_delete;
@@ -5253,14 +5280,14 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
- rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
rsv->size = min_size;
rsv->failfast = 1;
- global_rsv = &root->fs_info->global_block_rsv;
+ global_rsv = &fs_info->global_block_rsv;
btrfs_i_size_write(inode, 0);
@@ -5294,18 +5321,18 @@ void btrfs_evict_inode(struct inode *inode)
* steal_from_global == 3: abandon all hope!
*/
if (steal_from_global > 2) {
- btrfs_warn(root->fs_info,
- "Could not get space for a delete, will truncate on mount %d",
- ret);
+ btrfs_warn(fs_info,
+ "Could not get space for a delete, will truncate on mount %d",
+ ret);
btrfs_orphan_del(NULL, inode);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
@@ -5315,7 +5342,7 @@ void btrfs_evict_inode(struct inode *inode)
* again.
*/
if (steal_from_global) {
- if (!btrfs_check_space_for_delayed_refs(trans, root))
+ if (!btrfs_check_space_for_delayed_refs(trans, fs_info))
ret = btrfs_block_rsv_migrate(global_rsv, rsv,
min_size, 0);
else
@@ -5328,10 +5355,10 @@ void btrfs_evict_inode(struct inode *inode)
* again.
*/
if (ret) {
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
if (ret) {
btrfs_orphan_del(NULL, inode);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
continue;
@@ -5345,13 +5372,13 @@ void btrfs_evict_inode(struct inode *inode)
if (ret != -ENOSPC && ret != -EAGAIN)
break;
- trans->block_rsv = &root->fs_info->trans_block_rsv;
- btrfs_end_transaction(trans, root);
+ trans->block_rsv = &fs_info->trans_block_rsv;
+ btrfs_end_transaction(trans);
trans = NULL;
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
}
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
/*
* Errors here aren't a big deal, it just means we leave orphan items
@@ -5364,13 +5391,13 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_orphan_del(NULL, inode);
}
- trans->block_rsv = &root->fs_info->trans_block_rsv;
- if (!(root == root->fs_info->tree_root ||
+ trans->block_rsv = &fs_info->trans_block_rsv;
+ if (!(root == fs_info->tree_root ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
btrfs_return_ino(root, btrfs_ino(inode));
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
no_delete:
btrfs_remove_delayed_node(inode);
clear_inode(inode);
@@ -5416,7 +5443,7 @@ out_err:
* needs to be changed to reflect the root directory of the tree root. This
* is kind of like crossing a mount point.
*/
-static int fixup_tree_root_location(struct btrfs_root *root,
+static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
struct inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
@@ -5441,8 +5468,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
key.type = BTRFS_ROOT_REF_KEY;
key.offset = location->objectid;
- ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
- 0, 0);
+ ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret) {
if (ret < 0)
err = ret;
@@ -5463,7 +5489,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
btrfs_release_path(path);
- new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
+ new_root = btrfs_read_fs_root_no_name(fs_info, location);
if (IS_ERR(new_root)) {
err = PTR_ERR(new_root);
goto out;
@@ -5517,6 +5543,7 @@ static void inode_tree_add(struct inode *inode)
static void inode_tree_del(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
int empty = 0;
@@ -5529,7 +5556,7 @@ static void inode_tree_del(struct inode *inode)
spin_unlock(&root->inode_lock);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
- synchronize_srcu(&root->fs_info->subvol_srcu);
+ synchronize_srcu(&fs_info->subvol_srcu);
spin_lock(&root->inode_lock);
empty = RB_EMPTY_ROOT(&root->inode_tree);
spin_unlock(&root->inode_lock);
@@ -5540,13 +5567,14 @@ static void inode_tree_del(struct inode *inode)
void btrfs_invalidate_inodes(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
struct rb_node *prev;
struct btrfs_inode *entry;
struct inode *inode;
u64 objectid = 0;
- if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+ if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
spin_lock(&root->inode_lock);
@@ -5694,6 +5722,7 @@ static struct inode *new_simple_dir(struct super_block *s,
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
@@ -5718,8 +5747,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
- index = srcu_read_lock(&root->fs_info->subvol_srcu);
- ret = fixup_tree_root_location(root, dir, dentry,
+ index = srcu_read_lock(&fs_info->subvol_srcu);
+ ret = fixup_tree_root_location(fs_info, dir, dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
@@ -5729,13 +5758,13 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
} else {
inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
}
- srcu_read_unlock(&root->fs_info->subvol_srcu, index);
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
if (!IS_ERR(inode) && root != sub_root) {
- down_read(&root->fs_info->cleanup_work_sem);
+ down_read(&fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
ret = btrfs_orphan_cleanup(sub_root);
- up_read(&root->fs_info->cleanup_work_sem);
+ up_read(&fs_info->cleanup_work_sem);
if (ret) {
iput(inode);
inode = ERR_PTR(ret);
@@ -5792,6 +5821,7 @@ unsigned char btrfs_filetype_table[] = {
static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
@@ -5805,20 +5835,11 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
int slot;
unsigned char d_type;
int over = 0;
- u32 di_cur;
- u32 di_total;
- u32 di_len;
- int key_type = BTRFS_DIR_INDEX_KEY;
char tmp_name[32];
char *name_ptr;
int name_len;
- int is_curr = 0; /* ctx->pos points to the current index? */
- bool emitted;
bool put = false;
-
- /* FIXME, use a real flag for deciding about the key type */
- if (root->fs_info->tree_root == root)
- key_type = BTRFS_DIR_ITEM_KEY;
+ struct btrfs_key location;
if (!dir_emit_dots(file, ctx))
return 0;
@@ -5829,14 +5850,11 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
path->reada = READA_FORWARD;
- if (key_type == BTRFS_DIR_INDEX_KEY) {
- INIT_LIST_HEAD(&ins_list);
- INIT_LIST_HEAD(&del_list);
- put = btrfs_readdir_get_delayed_items(inode, &ins_list,
- &del_list);
- }
+ INIT_LIST_HEAD(&ins_list);
+ INIT_LIST_HEAD(&del_list);
+ put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
- key.type = key_type;
+ key.type = BTRFS_DIR_INDEX_KEY;
key.offset = ctx->pos;
key.objectid = btrfs_ino(inode);
@@ -5844,7 +5862,6 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
if (ret < 0)
goto err;
- emitted = false;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
@@ -5862,98 +5879,52 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
if (found_key.objectid != key.objectid)
break;
- if (found_key.type != key_type)
+ if (found_key.type != BTRFS_DIR_INDEX_KEY)
break;
if (found_key.offset < ctx->pos)
goto next;
- if (key_type == BTRFS_DIR_INDEX_KEY &&
- btrfs_should_delete_dir_index(&del_list,
- found_key.offset))
+ if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
goto next;
ctx->pos = found_key.offset;
- is_curr = 1;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
- di_cur = 0;
- di_total = btrfs_item_size(leaf, item);
-
- while (di_cur < di_total) {
- struct btrfs_key location;
-
- if (verify_dir_item(root, leaf, di))
- break;
+ if (verify_dir_item(fs_info, leaf, di))
+ goto next;
- name_len = btrfs_dir_name_len(leaf, di);
- if (name_len <= sizeof(tmp_name)) {
- name_ptr = tmp_name;
- } else {
- name_ptr = kmalloc(name_len, GFP_KERNEL);
- if (!name_ptr) {
- ret = -ENOMEM;
- goto err;
- }
+ name_len = btrfs_dir_name_len(leaf, di);
+ if (name_len <= sizeof(tmp_name)) {
+ name_ptr = tmp_name;
+ } else {
+ name_ptr = kmalloc(name_len, GFP_KERNEL);
+ if (!name_ptr) {
+ ret = -ENOMEM;
+ goto err;
}
- read_extent_buffer(leaf, name_ptr,
- (unsigned long)(di + 1), name_len);
-
- d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
- btrfs_dir_item_key_to_cpu(leaf, di, &location);
+ }
+ read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
+ name_len);
+ d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
+ btrfs_dir_item_key_to_cpu(leaf, di, &location);
- /* is this a reference to our own snapshot? If so
- * skip it.
- *
- * In contrast to old kernels, we insert the snapshot's
- * dir item and dir index after it has been created, so
- * we won't find a reference to our own snapshot. We
- * still keep the following code for backward
- * compatibility.
- */
- if (location.type == BTRFS_ROOT_ITEM_KEY &&
- location.objectid == root->root_key.objectid) {
- over = 0;
- goto skip;
- }
- over = !dir_emit(ctx, name_ptr, name_len,
- location.objectid, d_type);
+ over = !dir_emit(ctx, name_ptr, name_len, location.objectid,
+ d_type);
-skip:
- if (name_ptr != tmp_name)
- kfree(name_ptr);
+ if (name_ptr != tmp_name)
+ kfree(name_ptr);
- if (over)
- goto nopos;
- emitted = true;
- di_len = btrfs_dir_name_len(leaf, di) +
- btrfs_dir_data_len(leaf, di) + sizeof(*di);
- di_cur += di_len;
- di = (struct btrfs_dir_item *)((char *)di + di_len);
- }
+ if (over)
+ goto nopos;
+ ctx->pos++;
next:
path->slots[0]++;
}
- if (key_type == BTRFS_DIR_INDEX_KEY) {
- if (is_curr)
- ctx->pos++;
- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
- if (ret)
- goto nopos;
- }
-
- /*
- * If we haven't emitted any dir entry, we must not touch ctx->pos as
- * it was was set to the termination value in previous call. We assume
- * that "." and ".." were emitted if we reach this point and set the
- * termination value as well for an empty directory.
- */
- if (ctx->pos > 2 && !emitted)
+ ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
+ if (ret)
goto nopos;
- /* Reached end of directory/root. Bump pos past the last item. */
- ctx->pos++;
-
/*
* Stop new entries from being returned after we return the last
* entry.
@@ -5971,12 +5942,10 @@ next:
* last entry requires it because doing so has broken 32bit apps
* in the past.
*/
- if (key_type == BTRFS_DIR_INDEX_KEY) {
- if (ctx->pos >= INT_MAX)
- ctx->pos = LLONG_MAX;
- else
- ctx->pos = INT_MAX;
- }
+ if (ctx->pos >= INT_MAX)
+ ctx->pos = LLONG_MAX;
+ else
+ ctx->pos = INT_MAX;
nopos:
ret = 0;
err:
@@ -6006,7 +5975,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
}
return ret;
}
@@ -6019,6 +5988,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
*/
static int btrfs_dirty_inode(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
@@ -6033,16 +6003,16 @@ static int btrfs_dirty_inode(struct inode *inode)
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
}
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (BTRFS_I(inode)->delayed_node)
- btrfs_balance_delayed_items(root);
+ btrfs_balance_delayed_items(fs_info);
return ret;
}
@@ -6168,6 +6138,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
u64 ref_objectid, u64 objectid,
umode_t mode, u64 *index)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode;
struct btrfs_inode_item *inode_item;
struct btrfs_key *location;
@@ -6183,7 +6154,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if (!path)
return ERR_PTR(-ENOMEM);
- inode = new_inode(root->fs_info->sb);
+ inode = new_inode(fs_info->sb);
if (!inode) {
btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
@@ -6277,7 +6248,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
- memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
+ memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
sizeof(*inode_item));
fill_inode_item(trans, path->nodes[0], inode_item, inode);
@@ -6296,9 +6267,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_inherit_iflags(inode, dir);
if (S_ISREG(mode)) {
- if (btrfs_test_opt(root->fs_info, NODATASUM))
+ if (btrfs_test_opt(fs_info, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root->fs_info, NODATACOW))
+ if (btrfs_test_opt(fs_info, NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
BTRFS_INODE_NODATASUM;
}
@@ -6312,7 +6283,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
ret = btrfs_inode_inherit_props(trans, inode, dir);
if (ret)
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"error inheriting props for ino %llu (root %llu): %d",
btrfs_ino(inode), root->root_key.objectid, ret);
@@ -6343,6 +6314,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
struct inode *parent_inode, struct inode *inode,
const char *name, int name_len, int add_backref, u64 index)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
@@ -6358,9 +6330,9 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
}
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
- ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
- key.objectid, root->root_key.objectid,
- parent_ino, index, name, name_len);
+ ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
+ root->root_key.objectid, parent_ino,
+ index, name, name_len);
} else if (add_backref) {
ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
parent_ino, index);
@@ -6394,9 +6366,9 @@ fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
int err;
- err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
- key.objectid, root->root_key.objectid,
- parent_ino, &local_index, name, name_len);
+ err = btrfs_del_root_ref(trans, fs_info, key.objectid,
+ root->root_key.objectid, parent_ino,
+ &local_index, name, name_len);
} else if (add_backref) {
u64 local_index;
@@ -6423,6 +6395,7 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
@@ -6475,9 +6448,9 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
}
out_unlock:
- btrfs_end_transaction(trans, root);
- btrfs_balance_delayed_items(root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_balance_delayed_items(fs_info);
+ btrfs_btree_balance_dirty(fs_info);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
@@ -6494,6 +6467,7 @@ out_unlock_inode:
static int btrfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
@@ -6550,13 +6524,13 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
d_instantiate(dentry, inode);
out_unlock:
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (err && drop_inode_on_err) {
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_balance_delayed_items(root);
- btrfs_btree_balance_dirty(root);
+ btrfs_balance_delayed_items(fs_info);
+ btrfs_btree_balance_dirty(fs_info);
return err;
out_unlock_inode:
@@ -6571,6 +6545,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = d_inode(old_dentry);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 index;
int err;
int drop_inode = 0;
@@ -6628,20 +6603,21 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
btrfs_log_new_name(trans, inode, NULL, parent);
}
- btrfs_balance_delayed_items(root);
+ btrfs_balance_delayed_items(fs_info);
fail:
if (trans)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
return err;
}
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
@@ -6699,13 +6675,13 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
drop_on_err = 0;
out_fail:
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (drop_on_err) {
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_balance_delayed_items(root);
- btrfs_btree_balance_dirty(root);
+ btrfs_balance_delayed_items(fs_info);
+ btrfs_btree_balance_dirty(fs_info);
return err;
out_fail_inode:
@@ -6820,6 +6796,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
int err = 0;
u64 extent_start = 0;
@@ -6841,7 +6818,7 @@ again:
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em)
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
if (em) {
@@ -6857,7 +6834,7 @@ again:
err = -ENOMEM;
goto out;
}
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
em->start = EXTENT_MAP_HOLE;
em->orig_start = EXTENT_MAP_HOLE;
em->len = (u64)-1;
@@ -6916,7 +6893,8 @@ again:
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
- extent_end = ALIGN(extent_start + size, root->sectorsize);
+ extent_end = ALIGN(extent_start + size,
+ fs_info->sectorsize);
}
next:
if (start >= extent_end) {
@@ -6965,7 +6943,7 @@ next:
copy_size = min_t(u64, PAGE_SIZE - pg_offset,
size - extent_offset);
em->start = extent_start + extent_offset;
- em->len = ALIGN(copy_size, root->sectorsize);
+ em->len = ALIGN(copy_size, fs_info->sectorsize);
em->orig_block_len = em->len;
em->orig_start = em->start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
@@ -7024,7 +7002,7 @@ not_found_em:
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len);
err = -EIO;
@@ -7049,11 +7027,11 @@ insert:
* extent causing the -EEXIST.
*/
if (existing->start == em->start &&
- extent_map_end(existing) == extent_map_end(em) &&
+ extent_map_end(existing) >= extent_map_end(em) &&
em->block_start == existing->block_start) {
/*
- * these two extents are the same, it happens
- * with inlines especially
+ * The existing extent map already encompasses the
+ * entire extent map we tried to add.
*/
free_extent_map(em);
em = existing;
@@ -7085,7 +7063,7 @@ out:
btrfs_free_path(path);
if (trans) {
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
if (!err)
err = ret;
}
@@ -7264,6 +7242,7 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
u64 start, u64 len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map *em;
struct btrfs_key ins;
@@ -7271,17 +7250,18 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
int ret;
alloc_hint = get_extent_allocation_hint(inode, start, len);
- ret = btrfs_reserve_extent(root, len, len, root->sectorsize, 0,
- alloc_hint, &ins, 1, 1);
+ ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+ 0, alloc_hint, &ins, 1, 1);
if (ret)
return ERR_PTR(ret);
em = btrfs_create_dio_extent(inode, start, ins.offset, start,
ins.objectid, ins.offset, ins.offset,
ins.offset, 0);
- btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
if (IS_ERR(em))
- btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+ btrfs_free_reserved_extent(fs_info, ins.objectid,
+ ins.offset, 1);
return em;
}
@@ -7294,6 +7274,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len,
u64 *ram_bytes)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
int ret;
@@ -7374,14 +7355,15 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
}
- if (btrfs_extent_readonly(root, disk_bytenr))
+ if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out;
num_bytes = min(offset + *len, extent_end) - offset;
if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
u64 range_end;
- range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
+ range_end = round_up(offset + num_bytes,
+ root->fs_info->sectorsize) - 1;
ret = test_range_bit(io_tree, offset, range_end,
EXTENT_DELALLOC, 0, NULL);
if (ret) {
@@ -7404,7 +7386,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
key.offset - backref_offset, disk_bytenr);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret) {
ret = 0;
goto out;
@@ -7418,8 +7400,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
- if (csum_exist_in_range(root, disk_bytenr, num_bytes))
- goto out;
+ if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
+ goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
@@ -7653,8 +7635,8 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL;
struct btrfs_dio_data *dio_data = NULL;
u64 start = iblock << inode->i_blkbits;
@@ -7666,7 +7648,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (create)
unlock_bits |= EXTENT_DIRTY;
else
- len = min_t(u64, len, root->sectorsize);
+ len = min_t(u64, len, fs_info->sectorsize);
lockstart = start;
lockend = start + len - 1;
@@ -7755,14 +7737,14 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (can_nocow_extent(inode, start, &len, &orig_start,
&orig_block_len, &ram_bytes) == 1 &&
- btrfs_inc_nocow_writers(root->fs_info, block_start)) {
+ btrfs_inc_nocow_writers(fs_info, block_start)) {
struct extent_map *em2;
em2 = btrfs_create_dio_extent(inode, start, len,
orig_start, block_start,
len, orig_block_len,
ram_bytes, type);
- btrfs_dec_nocow_writers(root->fs_info, block_start);
+ btrfs_dec_nocow_writers(fs_info, block_start);
if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em);
em = em2;
@@ -7855,19 +7837,18 @@ err:
static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
int mirror_num)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
bio_get(bio);
- ret = btrfs_bio_wq_end_io(root->fs_info, bio,
- BTRFS_WQ_ENDIO_DIO_REPAIR);
+ ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
if (ret)
goto err;
- ret = btrfs_map_bio(root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
err:
bio_put(bio);
return ret;
@@ -7935,7 +7916,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
if ((failed_bio->bi_vcnt > 1)
|| (failed_bio->bi_io_vec->bv_len
- > BTRFS_I(inode)->root->sectorsize))
+ > btrfs_inode_sectorsize(inode)))
read_mode |= REQ_FAILFAST_DEV;
isector = start - btrfs_io_bio(failed_bio)->logical;
@@ -7980,7 +7961,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
ASSERT(bio->bi_vcnt == 1);
inode = bio->bi_io_vec->bv_page->mapping->host;
- ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
+ ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
done->uptodate = 1;
bio_for_each_segment_all(bvec, bio, i)
@@ -8004,7 +7985,7 @@ static int __btrfs_correct_data_nocsum(struct inode *inode,
int ret;
fs_info = BTRFS_I(inode)->root->fs_info;
- sectorsize = BTRFS_I(inode)->root->sectorsize;
+ sectorsize = fs_info->sectorsize;
start = io_bio->logical;
done.inode = inode;
@@ -8063,7 +8044,7 @@ static void btrfs_retry_endio(struct bio *bio)
ASSERT(bio->bi_vcnt == 1);
inode = bio->bi_io_vec->bv_page->mapping->host;
- ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
+ ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
bio_for_each_segment_all(bvec, bio, i) {
ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8098,7 +8079,7 @@ static int __btrfs_subio_endio_read(struct inode *inode,
int ret;
fs_info = BTRFS_I(inode)->root->fs_info;
- sectorsize = BTRFS_I(inode)->root->sectorsize;
+ sectorsize = fs_info->sectorsize;
err = 0;
start = io_bio->logical;
@@ -8195,7 +8176,7 @@ static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
const u64 bytes,
const int uptodate)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_extent *ordered = NULL;
u64 ordered_offset = offset;
u64 ordered_bytes = bytes;
@@ -8211,8 +8192,7 @@ again:
btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
finish_ordered_fn, NULL, NULL);
- btrfs_queue_work(root->fs_info->endio_write_workers,
- &ordered->work);
+ btrfs_queue_work(fs_info->endio_write_workers, &ordered->work);
out_test:
/*
* our bio might span multiple ordered extents. If we haven't
@@ -8247,8 +8227,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
unsigned long bio_flags, u64 offset)
{
int ret;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
+ ret = btrfs_csum_one_bio(inode, bio, offset, 1);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
@@ -8302,8 +8281,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
return bio;
}
-static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
- struct inode *inode,
+static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
struct btrfs_dio_private *dip,
struct bio *bio,
u64 file_offset)
@@ -8318,7 +8296,7 @@ static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
* contention.
*/
if (dip->logical_offset == file_offset) {
- ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
+ ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
file_offset);
if (ret)
return ret;
@@ -8338,9 +8316,9 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
u64 file_offset, int skip_sum,
int async_submit)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_dio_private *dip = bio->bi_private;
bool write = bio_op(bio) == REQ_OP_WRITE;
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
if (async_submit)
@@ -8349,8 +8327,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
bio_get(bio);
if (!write) {
- ret = btrfs_bio_wq_end_io(root->fs_info, bio,
- BTRFS_WQ_ENDIO_DATA);
+ ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
goto err;
}
@@ -8359,27 +8336,27 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
goto map;
if (write && async_submit) {
- ret = btrfs_wq_submit_bio(root->fs_info,
- inode, bio, 0, 0, file_offset,
- __btrfs_submit_bio_start_direct_io,
- __btrfs_submit_bio_done);
+ ret = btrfs_wq_submit_bio(fs_info, inode, bio, 0, 0,
+ file_offset,
+ __btrfs_submit_bio_start_direct_io,
+ __btrfs_submit_bio_done);
goto err;
} else if (write) {
/*
* If we aren't doing async submit, calculate the csum of the
* bio now.
*/
- ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
+ ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
if (ret)
goto err;
} else {
- ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
+ ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
file_offset);
if (ret)
goto err;
}
map:
- ret = btrfs_map_bio(root, bio, 0, async_submit);
+ ret = btrfs_map_bio(fs_info, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
@@ -8389,23 +8366,24 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
int skip_sum)
{
struct inode *inode = dip->inode;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct bio *bio;
struct bio *orig_bio = dip->orig_bio;
- struct bio_vec *bvec = orig_bio->bi_io_vec;
+ struct bio_vec *bvec;
u64 start_sector = orig_bio->bi_iter.bi_sector;
u64 file_offset = dip->logical_offset;
u64 submit_len = 0;
u64 map_length;
- u32 blocksize = root->sectorsize;
+ u32 blocksize = fs_info->sectorsize;
int async_submit = 0;
int nr_sectors;
int ret;
- int i;
+ int i, j;
map_length = orig_bio->bi_iter.bi_size;
- ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
- start_sector << 9, &map_length, NULL, 0);
+ ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9,
+ &map_length, NULL, 0);
if (ret)
return -EIO;
@@ -8431,8 +8409,8 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
btrfs_io_bio(bio)->logical = file_offset;
atomic_inc(&dip->pending_bios);
- while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
- nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len);
+ bio_for_each_segment_all(bvec, orig_bio, j) {
+ nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
i = 0;
next_block:
if (unlikely(map_length < submit_len + blocksize ||
@@ -8469,7 +8447,7 @@ next_block:
btrfs_io_bio(bio)->logical = file_offset;
map_length = orig_bio->bi_iter.bi_size;
- ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
+ ret = btrfs_map_block(fs_info, btrfs_op(orig_bio),
start_sector << 9,
&map_length, NULL, 0);
if (ret) {
@@ -8484,7 +8462,6 @@ next_block:
i++;
goto next_block;
}
- bvec++;
}
}
@@ -8616,12 +8593,13 @@ free_ordered:
kfree(dip);
}
-static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
- const struct iov_iter *iter, loff_t offset)
+static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
+ struct kiocb *iocb,
+ const struct iov_iter *iter, loff_t offset)
{
int seg;
int i;
- unsigned blocksize_mask = root->sectorsize - 1;
+ unsigned int blocksize_mask = fs_info->sectorsize - 1;
ssize_t retval = -EINVAL;
if (offset & blocksize_mask)
@@ -8653,7 +8631,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_dio_data dio_data = { 0 };
loff_t offset = iocb->ki_pos;
size_t count = 0;
@@ -8662,7 +8640,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
bool relock = false;
ssize_t ret;
- if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
+ if (check_direct_IO(fs_info, iocb, iter, offset))
return 0;
inode_dio_begin(inode);
@@ -8702,7 +8680,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* do the accounting properly if we go over the number we
* originally calculated. Abuse current->journal_info for this.
*/
- dio_data.reserve = round_up(count, root->sectorsize);
+ dio_data.reserve = round_up(count,
+ fs_info->sectorsize);
dio_data.unsubmitted_oe_range_start = (u64)offset;
dio_data.unsubmitted_oe_range_end = (u64)offset;
current->journal_info = &dio_data;
@@ -8714,7 +8693,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
ret = __blockdev_direct_IO(iocb, inode,
- BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+ fs_info->fs_devices->latest_bdev,
iter, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (iov_iter_rw(iter) == WRITE) {
@@ -8973,7 +8952,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vma->vm_file);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
@@ -9048,7 +9027,8 @@ again:
}
if (page->index == ((size - 1) >> PAGE_SHIFT)) {
- reserved_space = round_up(size - page_start, root->sectorsize);
+ reserved_space = round_up(size - page_start,
+ fs_info->sectorsize);
if (reserved_space < PAGE_SIZE) {
end = page_start + reserved_space - 1;
spin_lock(&BTRFS_I(inode)->lock);
@@ -9097,7 +9077,7 @@ again:
set_page_dirty(page);
SetPageUptodate(page);
- BTRFS_I(inode)->last_trans = root->fs_info->generation;
+ BTRFS_I(inode)->last_trans = fs_info->generation;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
@@ -9118,13 +9098,14 @@ out_noreserve:
static int btrfs_truncate(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv;
int ret = 0;
int err = 0;
struct btrfs_trans_handle *trans;
- u64 mask = root->sectorsize - 1;
- u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
+ u64 mask = fs_info->sectorsize - 1;
+ u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
(u64)-1);
@@ -9167,7 +9148,7 @@ static int btrfs_truncate(struct inode *inode)
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
- rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv)
return -ENOMEM;
rsv->size = min_size;
@@ -9184,7 +9165,7 @@ static int btrfs_truncate(struct inode *inode)
}
/* Migrate the slack space for the truncate to our reserve */
- ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
min_size, 0);
BUG_ON(ret);
@@ -9207,15 +9188,15 @@ static int btrfs_truncate(struct inode *inode)
break;
}
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
err = ret;
break;
}
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
@@ -9224,7 +9205,7 @@ static int btrfs_truncate(struct inode *inode)
break;
}
- ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
@@ -9238,16 +9219,16 @@ static int btrfs_truncate(struct inode *inode)
}
if (trans) {
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret && !err)
err = ret;
- ret = btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ ret = btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
}
out:
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
if (ret && !err)
err = ret;
@@ -9363,6 +9344,7 @@ static void btrfs_i_callback(struct rcu_head *head)
void btrfs_destroy_inode(struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_extent *ordered;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -9384,8 +9366,8 @@ void btrfs_destroy_inode(struct inode *inode)
if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags)) {
- btrfs_info(root->fs_info, "inode %llu still on the orphan list",
- btrfs_ino(inode));
+ btrfs_info(fs_info, "inode %llu still on the orphan list",
+ btrfs_ino(inode));
atomic_dec(&root->orphan_inodes);
}
@@ -9394,7 +9376,7 @@ void btrfs_destroy_inode(struct inode *inode)
if (!ordered)
break;
else {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"found ordered extent %llu %llu on inode cleanup",
ordered->file_offset, ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
@@ -9506,6 +9488,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
struct inode *new_dir,
struct dentry *new_dentry)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
@@ -9528,9 +9511,9 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* close the race window with snapshot create/destroy ioctl */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
- down_read(&root->fs_info->subvol_sem);
+ down_read(&fs_info->subvol_sem);
if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
- down_read(&dest->fs_info->subvol_sem);
+ down_read(&fs_info->subvol_sem);
/*
* We want to reserve the absolute worst case amount of items. So if
@@ -9563,7 +9546,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* Reference for the source. */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
/* force full log commit if subvolume involved. */
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
} else {
btrfs_pin_log_trans(root);
root_log_pinned = true;
@@ -9579,7 +9562,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* And now for the dest. */
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
/* force full log commit if subvolume involved. */
- btrfs_set_log_full_commit(dest->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
} else {
btrfs_pin_log_trans(dest);
dest_log_pinned = true;
@@ -9693,12 +9676,12 @@ out_fail:
* allow the tasks to sync it.
*/
if (ret && (root_log_pinned || dest_log_pinned)) {
- if (btrfs_inode_in_log(old_dir, root->fs_info->generation) ||
- btrfs_inode_in_log(new_dir, root->fs_info->generation) ||
- btrfs_inode_in_log(old_inode, root->fs_info->generation) ||
+ if (btrfs_inode_in_log(old_dir, fs_info->generation) ||
+ btrfs_inode_in_log(new_dir, fs_info->generation) ||
+ btrfs_inode_in_log(old_inode, fs_info->generation) ||
(new_inode &&
- btrfs_inode_in_log(new_inode, root->fs_info->generation)))
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_inode_in_log(new_inode, fs_info->generation)))
+ btrfs_set_log_full_commit(fs_info, trans);
if (root_log_pinned) {
btrfs_end_log_trans(root);
@@ -9709,12 +9692,12 @@ out_fail:
dest_log_pinned = false;
}
}
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
out_notrans:
if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
- up_read(&dest->fs_info->subvol_sem);
+ up_read(&fs_info->subvol_sem);
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
- up_read(&root->fs_info->subvol_sem);
+ up_read(&fs_info->subvol_sem);
return ret;
}
@@ -9774,6 +9757,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
struct btrfs_trans_handle *trans;
unsigned int trans_num_items;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
@@ -9830,7 +9814,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
/* close the racy window with snapshot create/destroy ioctl */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
- down_read(&root->fs_info->subvol_sem);
+ down_read(&fs_info->subvol_sem);
/*
* We want to reserve the absolute worst case amount of items. So if
* both inodes are subvols and we need to unlink them then that would
@@ -9861,7 +9845,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
BTRFS_I(old_inode)->dir_index = 0ULL;
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
/* force full log commit if subvolume involved. */
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
} else {
btrfs_pin_log_trans(root);
log_pinned = true;
@@ -9968,20 +9952,20 @@ out_fail:
* allow the tasks to sync it.
*/
if (ret && log_pinned) {
- if (btrfs_inode_in_log(old_dir, root->fs_info->generation) ||
- btrfs_inode_in_log(new_dir, root->fs_info->generation) ||
- btrfs_inode_in_log(old_inode, root->fs_info->generation) ||
+ if (btrfs_inode_in_log(old_dir, fs_info->generation) ||
+ btrfs_inode_in_log(new_dir, fs_info->generation) ||
+ btrfs_inode_in_log(old_inode, fs_info->generation) ||
(new_inode &&
- btrfs_inode_in_log(new_inode, root->fs_info->generation)))
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_inode_in_log(new_inode, fs_info->generation)))
+ btrfs_set_log_full_commit(fs_info, trans);
btrfs_end_log_trans(root);
log_pinned = false;
}
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
- up_read(&root->fs_info->subvol_sem);
+ up_read(&fs_info->subvol_sem);
return ret;
}
@@ -10116,9 +10100,10 @@ out:
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
- if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EROFS;
ret = __start_delalloc_inodes(root, delay_iput, -1);
@@ -10129,14 +10114,14 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
*/
- atomic_inc(&root->fs_info->async_submit_draining);
- while (atomic_read(&root->fs_info->nr_async_submits) ||
- atomic_read(&root->fs_info->async_delalloc_pages)) {
- wait_event(root->fs_info->async_submit_wait,
- (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
- atomic_read(&root->fs_info->async_delalloc_pages) == 0));
- }
- atomic_dec(&root->fs_info->async_submit_draining);
+ atomic_inc(&fs_info->async_submit_draining);
+ while (atomic_read(&fs_info->nr_async_submits) ||
+ atomic_read(&fs_info->async_delalloc_pages)) {
+ wait_event(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->nr_async_submits) == 0 &&
+ atomic_read(&fs_info->async_delalloc_pages) == 0));
+ }
+ atomic_dec(&fs_info->async_submit_draining);
return ret;
}
@@ -10199,6 +10184,7 @@ out:
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
@@ -10215,7 +10201,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
struct extent_buffer *leaf;
name_len = strlen(symname);
- if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
+ if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
return -ENAMETOOLONG;
/*
@@ -10309,12 +10295,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
d_instantiate(dentry, inode);
out_unlock:
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
return err;
out_unlock_inode:
@@ -10328,6 +10314,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
loff_t actual_len, u64 *alloc_hint,
struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -10364,10 +10351,10 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
min_size, 0, *alloc_hint, &ins, 1, 0);
if (ret) {
if (own_trans)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
break;
}
- btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
last_alloc = ins.offset;
ret = insert_reserved_file_extent(trans, inode,
@@ -10376,11 +10363,11 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
if (ret) {
- btrfs_free_reserved_extent(root, ins.objectid,
+ btrfs_free_reserved_extent(fs_info, ins.objectid,
ins.offset, 0);
btrfs_abort_transaction(trans, ret);
if (own_trans)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
break;
}
@@ -10401,7 +10388,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
em->ram_bytes = ins.offset;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
em->generation = trans->transid;
@@ -10440,12 +10427,12 @@ next:
if (ret) {
btrfs_abort_transaction(trans, ret);
if (own_trans)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
break;
}
if (own_trans)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
}
if (cur_offset < end)
btrfs_free_reserved_data_space(inode, cur_offset,
@@ -10493,6 +10480,7 @@ static int btrfs_permission(struct inode *inode, int mask)
static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
@@ -10549,11 +10537,11 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
mark_inode_dirty(inode);
out:
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (ret)
iput(inode);
- btrfs_balance_delayed_items(root);
- btrfs_btree_balance_dirty(root);
+ btrfs_balance_delayed_items(fs_info);
+ btrfs_btree_balance_dirty(fs_info);
return ret;
out_inode:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7acbd2cf6192..0a6902555e65 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -33,7 +33,6 @@
#include <linux/namei.h>
#include <linux/swap.h>
#include <linux/writeback.h>
-#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
#include <linux/security.h>
@@ -216,6 +215,7 @@ static int check_flags(unsigned int flags)
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_inode *ip = BTRFS_I(inode);
struct btrfs_root *root = ip->root;
struct btrfs_trans_handle *trans;
@@ -325,7 +325,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
ip->flags |= BTRFS_INODE_COMPRESS;
ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
- if (root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
+ if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
comp = "lzo";
else
comp = "zlib";
@@ -352,7 +352,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
inode->i_ctime = current_time(inode);
ret = btrfs_update_inode(trans, root, inode);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
out_drop:
if (ret) {
ip->flags = ip_oldflags;
@@ -374,7 +374,8 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_device *device;
struct request_queue *q;
struct fstrim_range range;
@@ -410,7 +411,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
range.len = min(range.len, total_bytes - range.start);
range.minlen = max(range.minlen, minlen);
- ret = btrfs_trim_fs(fs_info->tree_root, &range);
+ ret = btrfs_trim_fs(fs_info, &range);
if (ret < 0)
return ret;
@@ -437,6 +438,7 @@ static noinline int create_subvol(struct inode *dir,
u64 *async_transid,
struct btrfs_qgroup_inherit *inherit)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_key key;
struct btrfs_root_item *root_item;
@@ -459,7 +461,7 @@ static noinline int create_subvol(struct inode *dir,
if (!root_item)
return -ENOMEM;
- ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
+ ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
if (ret)
goto fail_free;
@@ -485,14 +487,14 @@ static noinline int create_subvol(struct inode *dir,
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
- btrfs_subvolume_release_metadata(root, &block_rsv,
+ btrfs_subvolume_release_metadata(fs_info, &block_rsv,
qgroup_reserved);
goto fail_free;
}
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
- ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
+ ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit);
if (ret)
goto fail;
@@ -502,24 +504,22 @@ static noinline int create_subvol(struct inode *dir,
goto fail;
}
- memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+ memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(leaf, objectid);
- write_extent_buffer(leaf, root->fs_info->fsid, btrfs_header_fsid(),
- BTRFS_FSID_SIZE);
- write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
- btrfs_header_chunk_tree_uuid(leaf),
- BTRFS_UUID_SIZE);
+ write_extent_buffer_fsid(leaf, fs_info->fsid);
+ write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
btrfs_mark_buffer_dirty(leaf);
inode_item = &root_item->inode;
btrfs_set_stack_inode_generation(inode_item, 1);
btrfs_set_stack_inode_size(inode_item, 3);
btrfs_set_stack_inode_nlink(inode_item, 1);
- btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
+ btrfs_set_stack_inode_nbytes(inode_item,
+ fs_info->nodesize);
btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
btrfs_set_root_flags(root_item, 0);
@@ -552,13 +552,13 @@ static noinline int create_subvol(struct inode *dir,
key.objectid = objectid;
key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
- ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
+ ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
root_item);
if (ret)
goto fail;
key.offset = (u64)-1;
- new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
+ new_root = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
btrfs_abort_transaction(trans, ret);
@@ -599,14 +599,13 @@ static noinline int create_subvol(struct inode *dir,
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
- ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
+ ret = btrfs_add_root_ref(trans, fs_info,
objectid, root->root_key.objectid,
btrfs_ino(dir), index, name, namelen);
BUG_ON(ret);
- ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
- root_item->uuid, BTRFS_UUID_KEY_SUBVOL,
- objectid);
+ ret = btrfs_uuid_tree_add(trans, fs_info, root_item->uuid,
+ BTRFS_UUID_KEY_SUBVOL, objectid);
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -614,15 +613,15 @@ fail:
kfree(root_item);
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
- btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
+ btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
if (async_transid) {
*async_transid = trans->transid;
- err = btrfs_commit_transaction_async(trans, root, 1);
+ err = btrfs_commit_transaction_async(trans, 1);
if (err)
- err = btrfs_commit_transaction(trans, root);
+ err = btrfs_commit_transaction(trans);
} else {
- err = btrfs_commit_transaction(trans, root);
+ err = btrfs_commit_transaction(trans);
}
if (err && !ret)
ret = err;
@@ -662,6 +661,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
u64 *async_transid, bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
struct btrfs_trans_handle *trans;
@@ -721,19 +721,17 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto fail;
}
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
list_add(&pending_snapshot->list,
&trans->transaction->pending_snapshots);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
if (async_transid) {
*async_transid = trans->transid;
- ret = btrfs_commit_transaction_async(trans,
- root->fs_info->extent_root, 1);
+ ret = btrfs_commit_transaction_async(trans, 1);
if (ret)
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
} else {
- ret = btrfs_commit_transaction(trans,
- root->fs_info->extent_root);
+ ret = btrfs_commit_transaction(trans);
}
if (ret)
goto fail;
@@ -755,7 +753,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
d_instantiate(dentry, inode);
ret = 0;
fail:
- btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
+ btrfs_subvolume_release_metadata(fs_info,
&pending_snapshot->block_rsv,
pending_snapshot->qgroup_reserved);
dec_and_free:
@@ -842,7 +840,8 @@ static noinline int btrfs_mksubvol(struct path *parent,
u64 *async_transid, bool readonly,
struct btrfs_qgroup_inherit *inherit)
{
- struct inode *dir = d_inode(parent->dentry);
+ struct inode *dir = d_inode(parent->dentry);
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct dentry *dentry;
int error;
@@ -869,7 +868,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
if (error)
goto out_dput;
- down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
+ down_read(&fs_info->subvol_sem);
if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
goto out_up_read;
@@ -884,7 +883,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
if (!error)
fsnotify_mkdir(dir, dentry);
out_up_read:
- up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
+ up_read(&fs_info->subvol_sem);
out_dput:
dput(dentry);
out_unlock:
@@ -1268,6 +1267,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_to_defrag)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct file_ra_state *ra = NULL;
unsigned long last_index;
@@ -1365,8 +1365,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!(inode->i_sb->s_flags & MS_ACTIVE))
break;
- if (btrfs_defrag_cancelled(root->fs_info)) {
- btrfs_debug(root->fs_info, "defrag_file cancelled");
+ if (btrfs_defrag_cancelled(fs_info)) {
+ btrfs_debug(fs_info, "defrag_file cancelled");
ret = -EAGAIN;
break;
}
@@ -1454,18 +1454,18 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
*/
- atomic_inc(&root->fs_info->async_submit_draining);
- while (atomic_read(&root->fs_info->nr_async_submits) ||
- atomic_read(&root->fs_info->async_delalloc_pages)) {
- wait_event(root->fs_info->async_submit_wait,
- (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
- atomic_read(&root->fs_info->async_delalloc_pages) == 0));
+ atomic_inc(&fs_info->async_submit_draining);
+ while (atomic_read(&fs_info->nr_async_submits) ||
+ atomic_read(&fs_info->async_delalloc_pages)) {
+ wait_event(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->nr_async_submits) == 0 &&
+ atomic_read(&fs_info->async_delalloc_pages) == 0));
}
- atomic_dec(&root->fs_info->async_submit_draining);
+ atomic_dec(&fs_info->async_submit_draining);
}
if (range->compress_type == BTRFS_COMPRESS_LZO) {
- btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
+ btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
}
ret = defrag_count;
@@ -1485,10 +1485,12 @@ out_ra:
static noinline int btrfs_ioctl_resize(struct file *file,
void __user *arg)
{
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 new_size;
u64 old_size;
u64 devid = 1;
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
struct btrfs_trans_handle *trans;
struct btrfs_device *device = NULL;
@@ -1505,13 +1507,12 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (ret)
return ret;
- if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
- 1)) {
+ if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
mnt_drop_write_file(file);
return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
}
- mutex_lock(&root->fs_info->volume_mutex);
+ mutex_lock(&fs_info->volume_mutex);
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
@@ -1533,19 +1534,19 @@ static noinline int btrfs_ioctl_resize(struct file *file,
ret = -EINVAL;
goto out_free;
}
- btrfs_info(root->fs_info, "resizing devid %llu", devid);
+ btrfs_info(fs_info, "resizing devid %llu", devid);
}
- device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
+ device = btrfs_find_device(fs_info, devid, NULL, NULL);
if (!device) {
- btrfs_info(root->fs_info, "resizer unable to find device %llu",
- devid);
+ btrfs_info(fs_info, "resizer unable to find device %llu",
+ devid);
ret = -ENODEV;
goto out_free;
}
if (!device->writeable) {
- btrfs_info(root->fs_info,
+ btrfs_info(fs_info,
"resizer unable to apply on readonly device %llu",
devid);
ret = -EPERM;
@@ -1599,11 +1600,11 @@ static noinline int btrfs_ioctl_resize(struct file *file,
goto out_free;
}
- new_size = div_u64(new_size, root->sectorsize);
- new_size *= root->sectorsize;
+ new_size = div_u64(new_size, fs_info->sectorsize);
+ new_size *= fs_info->sectorsize;
- btrfs_info_in_rcu(root->fs_info, "new size for %s is %llu",
- rcu_str_deref(device->name), new_size);
+ btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
+ rcu_str_deref(device->name), new_size);
if (new_size > old_size) {
trans = btrfs_start_transaction(root, 0);
@@ -1612,7 +1613,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
goto out_free;
}
ret = btrfs_grow_device(trans, device, new_size);
- btrfs_commit_transaction(trans, root);
+ btrfs_commit_transaction(trans);
} else if (new_size < old_size) {
ret = btrfs_shrink_device(device, new_size);
} /* equal, nothing need to do */
@@ -1620,8 +1621,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
out_free:
kfree(vol_args);
out:
- mutex_unlock(&root->fs_info->volume_mutex);
- atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+ mutex_unlock(&fs_info->volume_mutex);
+ atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
mnt_drop_write_file(file);
return ret;
}
@@ -1774,6 +1775,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
void __user *arg)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
u64 flags = 0;
@@ -1781,10 +1783,10 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
return -EINVAL;
- down_read(&root->fs_info->subvol_sem);
+ down_read(&fs_info->subvol_sem);
if (btrfs_root_readonly(root))
flags |= BTRFS_SUBVOL_RDONLY;
- up_read(&root->fs_info->subvol_sem);
+ up_read(&fs_info->subvol_sem);
if (copy_to_user(arg, &flags, sizeof(flags)))
ret = -EFAULT;
@@ -1796,6 +1798,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
void __user *arg)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 root_flags;
@@ -1829,7 +1832,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
goto out_drop_write;
}
- down_write(&root->fs_info->subvol_sem);
+ down_write(&fs_info->subvol_sem);
/* nothing to do */
if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
@@ -1851,9 +1854,9 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
spin_unlock(&root->root_item_lock);
} else {
spin_unlock(&root->root_item_lock);
- btrfs_warn(root->fs_info,
- "Attempt to set subvolume %llu read-write during send",
- root->root_key.objectid);
+ btrfs_warn(fs_info,
+ "Attempt to set subvolume %llu read-write during send",
+ root->root_key.objectid);
ret = -EPERM;
goto out_drop_sem;
}
@@ -1865,15 +1868,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
goto out_reset;
}
- ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
- btrfs_commit_transaction(trans, root);
+ btrfs_commit_transaction(trans);
out_reset:
if (ret)
btrfs_set_root_flags(&root->root_item, root_flags);
out_drop_sem:
- up_write(&root->fs_info->subvol_sem);
+ up_write(&fs_info->subvol_sem);
out_drop_write:
mnt_drop_write_file(file);
out:
@@ -1885,6 +1888,7 @@ out:
*/
static noinline int may_destroy_subvol(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_dir_item *di;
struct btrfs_key key;
@@ -1896,14 +1900,14 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
return -ENOMEM;
/* Make sure this root isn't set as the default subvol */
- dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
- di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root, path,
+ dir_id = btrfs_super_root_dir(fs_info->super_copy);
+ di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
dir_id, "default", 7, 0);
if (di && !IS_ERR(di)) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
if (key.objectid == root->root_key.objectid) {
ret = -EPERM;
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"deleting default subvolume %llu is not allowed",
key.objectid);
goto out;
@@ -1915,8 +1919,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
key.type = BTRFS_ROOT_REF_KEY;
key.offset = (u64)-1;
- ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
- &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret < 0)
goto out;
BUG_ON(ret == 0);
@@ -2087,10 +2090,10 @@ static noinline int search_ioctl(struct inode *inode,
size_t *buf_size,
char __user *ubuf)
{
+ struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
struct btrfs_root *root;
struct btrfs_key key;
struct btrfs_path *path;
- struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
int ret;
int num_found = 0;
unsigned long sk_offset = 0;
@@ -2353,6 +2356,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
void __user *arg)
{
struct dentry *parent = file->f_path.dentry;
+ struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
struct dentry *dentry;
struct inode *dir = d_inode(parent);
struct inode *inode;
@@ -2418,7 +2422,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* rmdir(2).
*/
err = -EPERM;
- if (!btrfs_test_opt(root->fs_info, USER_SUBVOL_RM_ALLOWED))
+ if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
goto out_dput;
/*
@@ -2462,14 +2466,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
spin_unlock(&dest->root_item_lock);
} else {
spin_unlock(&dest->root_item_lock);
- btrfs_warn(root->fs_info,
- "Attempt to delete subvolume %llu during send",
- dest->root_key.objectid);
+ btrfs_warn(fs_info,
+ "Attempt to delete subvolume %llu during send",
+ dest->root_key.objectid);
err = -EPERM;
goto out_unlock_inode;
}
- down_write(&root->fs_info->subvol_sem);
+ down_write(&fs_info->subvol_sem);
err = may_destroy_subvol(dest);
if (err)
@@ -2514,7 +2518,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
ret = btrfs_insert_orphan_item(trans,
- root->fs_info->tree_root,
+ fs_info->tree_root,
dest->root_key.objectid);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -2523,8 +2527,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
}
}
- ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
- dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
+ ret = btrfs_uuid_tree_rem(trans, fs_info, dest->root_item.uuid,
+ BTRFS_UUID_KEY_SUBVOL,
dest->root_key.objectid);
if (ret && ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
@@ -2532,7 +2536,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
goto out_end_trans;
}
if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
- ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
+ ret = btrfs_uuid_tree_rem(trans, fs_info,
dest->root_item.received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
dest->root_key.objectid);
@@ -2546,14 +2550,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
out_end_trans:
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
if (ret && !err)
err = ret;
inode->i_flags |= S_DEAD;
out_release:
- btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
+ btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
out_up_write:
- up_write(&root->fs_info->subvol_sem);
+ up_write(&fs_info->subvol_sem);
if (err) {
spin_lock(&dest->root_item_lock);
root_flags = btrfs_root_flags(&dest->root_item);
@@ -2655,7 +2659,7 @@ out:
return ret;
}
-static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
{
struct btrfs_ioctl_vol_args *vol_args;
int ret;
@@ -2663,12 +2667,10 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
- 1)) {
+ if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1))
return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
- }
- mutex_lock(&root->fs_info->volume_mutex);
+ mutex_lock(&fs_info->volume_mutex);
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
@@ -2676,21 +2678,22 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
- ret = btrfs_init_new_device(root, vol_args->name);
+ ret = btrfs_init_new_device(fs_info, vol_args->name);
if (!ret)
- btrfs_info(root->fs_info, "disk added %s",vol_args->name);
+ btrfs_info(fs_info, "disk added %s", vol_args->name);
kfree(vol_args);
out:
- mutex_unlock(&root->fs_info->volume_mutex);
- atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+ mutex_unlock(&fs_info->volume_mutex);
+ atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
@@ -2711,28 +2714,27 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
return -EOPNOTSUPP;
- if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
- 1)) {
+ if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
goto out;
}
- mutex_lock(&root->fs_info->volume_mutex);
+ mutex_lock(&fs_info->volume_mutex);
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
- ret = btrfs_rm_device(root, NULL, vol_args->devid);
+ ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
} else {
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
- ret = btrfs_rm_device(root, vol_args->name, 0);
+ ret = btrfs_rm_device(fs_info, vol_args->name, 0);
}
- mutex_unlock(&root->fs_info->volume_mutex);
- atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+ mutex_unlock(&fs_info->volume_mutex);
+ atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
if (!ret) {
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
- btrfs_info(root->fs_info, "device deleted: id %llu",
+ btrfs_info(fs_info, "device deleted: id %llu",
vol_args->devid);
else
- btrfs_info(root->fs_info, "device deleted: %s",
+ btrfs_info(fs_info, "device deleted: %s",
vol_args->name);
}
out:
@@ -2744,7 +2746,8 @@ err_drop:
static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args *vol_args;
int ret;
@@ -2755,8 +2758,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
if (ret)
return ret;
- if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
- 1)) {
+ if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
goto out_drop_write;
}
@@ -2768,26 +2770,27 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
- mutex_lock(&root->fs_info->volume_mutex);
- ret = btrfs_rm_device(root, vol_args->name, 0);
- mutex_unlock(&root->fs_info->volume_mutex);
+ mutex_lock(&fs_info->volume_mutex);
+ ret = btrfs_rm_device(fs_info, vol_args->name, 0);
+ mutex_unlock(&fs_info->volume_mutex);
if (!ret)
- btrfs_info(root->fs_info, "disk deleted %s",vol_args->name);
+ btrfs_info(fs_info, "disk deleted %s", vol_args->name);
kfree(vol_args);
out:
- atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+ atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
out_drop_write:
mnt_drop_write_file(file);
return ret;
}
-static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
struct btrfs_ioctl_fs_info_args *fi_args;
struct btrfs_device *device;
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
int ret = 0;
fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
@@ -2796,7 +2799,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
mutex_lock(&fs_devices->device_list_mutex);
fi_args->num_devices = fs_devices->num_devices;
- memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
+ memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->devid > fi_args->max_id)
@@ -2804,9 +2807,9 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
}
mutex_unlock(&fs_devices->device_list_mutex);
- fi_args->nodesize = root->fs_info->super_copy->nodesize;
- fi_args->sectorsize = root->fs_info->super_copy->sectorsize;
- fi_args->clone_alignment = root->fs_info->super_copy->sectorsize;
+ fi_args->nodesize = fs_info->super_copy->nodesize;
+ fi_args->sectorsize = fs_info->super_copy->sectorsize;
+ fi_args->clone_alignment = fs_info->super_copy->sectorsize;
if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
ret = -EFAULT;
@@ -2815,11 +2818,12 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
return ret;
}
-static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
struct btrfs_ioctl_dev_info_args *di_args;
struct btrfs_device *dev;
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
int ret = 0;
char *s_uuid = NULL;
@@ -2831,7 +2835,7 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
s_uuid = di_args->uuid;
mutex_lock(&fs_devices->device_list_mutex);
- dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
+ dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
if (!dev) {
ret = -ENODEV;
@@ -3305,10 +3309,10 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
out:
return ret;
}
@@ -3406,9 +3410,10 @@ static int clone_copy_inline_extent(struct inode *src,
const u64 size,
char *inline_data)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
struct btrfs_root *root = BTRFS_I(dst)->root;
const u64 aligned_end = ALIGN(new_key->offset + datal,
- root->sectorsize);
+ fs_info->sectorsize);
int ret;
struct btrfs_key key;
@@ -3529,6 +3534,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
const u64 off, const u64 olen, const u64 olen_aligned,
const u64 destoff, int no_time_update)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path = NULL;
struct extent_buffer *leaf;
@@ -3542,9 +3548,9 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
u64 last_dest_end = destoff;
ret = -ENOMEM;
- buf = kmalloc(root->nodesize, GFP_KERNEL | __GFP_NOWARN);
+ buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
if (!buf) {
- buf = vmalloc(root->nodesize);
+ buf = vmalloc(fs_info->nodesize);
if (!buf)
return ret;
}
@@ -3707,7 +3713,7 @@ process_slot:
if (ret != -EOPNOTSUPP)
btrfs_abort_transaction(trans,
ret);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
@@ -3715,7 +3721,7 @@ process_slot:
&new_key, size);
if (ret) {
btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
@@ -3739,7 +3745,8 @@ process_slot:
if (disko) {
inode_add_bytes(inode, datal);
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans,
+ fs_info,
disko, diskl, 0,
root->root_key.objectid,
btrfs_ino(inode),
@@ -3747,8 +3754,7 @@ process_slot:
if (ret) {
btrfs_abort_transaction(trans,
ret);
- btrfs_end_transaction(trans,
- root);
+ btrfs_end_transaction(trans);
goto out;
}
@@ -3767,7 +3773,7 @@ process_slot:
if (comp && (skip || trim)) {
ret = -EINVAL;
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
size -= skip + trim;
@@ -3783,7 +3789,7 @@ process_slot:
if (ret != -EOPNOTSUPP)
btrfs_abort_transaction(trans,
ret);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
leaf = path->nodes[0];
@@ -3802,7 +3808,7 @@ process_slot:
btrfs_release_path(path);
last_dest_end = ALIGN(new_key.offset + datal,
- root->sectorsize);
+ fs_info->sectorsize);
ret = clone_finish_inode_update(trans, inode,
last_dest_end,
destoff, olen,
@@ -3843,7 +3849,7 @@ process_slot:
if (ret) {
if (ret != -EOPNOTSUPP)
btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
clone_update_extent_map(inode, trans, NULL, last_dest_end,
@@ -3863,10 +3869,11 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
{
struct inode *inode = file_inode(file);
struct inode *src = file_inode(file_src);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
u64 len = olen;
- u64 bs = root->fs_info->sb->s_blocksize;
+ u64 bs = fs_info->sb->s_blocksize;
int same_inode = src == inode;
/*
@@ -4007,6 +4014,7 @@ int btrfs_clone_file_range(struct file *src_file, loff_t off,
static long btrfs_ioctl_trans_start(struct file *file)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
@@ -4027,7 +4035,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
if (ret)
goto out;
- atomic_inc(&root->fs_info->open_ioctl_trans);
+ atomic_inc(&fs_info->open_ioctl_trans);
ret = -ENOMEM;
trans = btrfs_start_ioctl_transaction(root);
@@ -4038,7 +4046,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
return 0;
out_drop:
- atomic_dec(&root->fs_info->open_ioctl_trans);
+ atomic_dec(&fs_info->open_ioctl_trans);
mnt_drop_write_file(file);
out:
return ret;
@@ -4047,6 +4055,7 @@ out:
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_root *new_root;
struct btrfs_dir_item *di;
@@ -4077,7 +4086,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
location.type = BTRFS_ROOT_ITEM_KEY;
location.offset = (u64)-1;
- new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
+ new_root = btrfs_read_fs_root_no_name(fs_info, &location);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
goto out;
@@ -4097,13 +4106,13 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
goto out;
}
- dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
- di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
+ dir_id = btrfs_super_root_dir(fs_info->super_copy);
+ di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
dir_id, "default", 7, 1);
if (IS_ERR_OR_NULL(di)) {
btrfs_free_path(path);
- btrfs_end_transaction(trans, root);
- btrfs_err(new_root->fs_info,
+ btrfs_end_transaction(trans);
+ btrfs_err(fs_info,
"Umm, you don't have the default diritem, this isn't going to work");
ret = -ENOENT;
goto out;
@@ -4114,8 +4123,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
- btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
- btrfs_end_transaction(trans, root);
+ btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
+ btrfs_end_transaction(trans);
out:
mnt_drop_write_file(file);
return ret;
@@ -4137,7 +4146,8 @@ void btrfs_get_block_group_info(struct list_head *groups_list,
}
}
-static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
struct btrfs_ioctl_space_args space_args;
struct btrfs_ioctl_space_info space;
@@ -4165,7 +4175,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
info = NULL;
rcu_read_lock();
- list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
+ list_for_each_entry_rcu(tmp, &fs_info->space_info,
list) {
if (tmp->flags == types[i]) {
info = tmp;
@@ -4221,7 +4231,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
info = NULL;
rcu_read_lock();
- list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
+ list_for_each_entry_rcu(tmp, &fs_info->space_info,
list) {
if (tmp->flags == types[i]) {
info = tmp;
@@ -4252,7 +4262,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
* Add global block reserve
*/
if (slot_count) {
- struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
+ struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
spin_lock(&block_rsv->lock);
space.total_bytes = block_rsv->size;
@@ -4294,7 +4304,7 @@ long btrfs_ioctl_trans_end(struct file *file)
return -EINVAL;
file->private_data = NULL;
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
atomic_dec(&root->fs_info->open_ioctl_trans);
@@ -4319,9 +4329,9 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
goto out;
}
transid = trans->transid;
- ret = btrfs_commit_transaction_async(trans, root, 0);
+ ret = btrfs_commit_transaction_async(trans, 0);
if (ret) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
out:
@@ -4331,7 +4341,7 @@ out:
return 0;
}
-static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
+static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
void __user *argp)
{
u64 transid;
@@ -4342,12 +4352,12 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
} else {
transid = 0; /* current trans */
}
- return btrfs_wait_for_commit(root, transid);
+ return btrfs_wait_for_commit(fs_info, transid);
}
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
struct btrfs_ioctl_scrub_args *sa;
int ret;
@@ -4364,7 +4374,7 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
goto out;
}
- ret = btrfs_scrub_dev(root->fs_info, sa->devid, sa->start, sa->end,
+ ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
&sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
0);
@@ -4378,15 +4388,15 @@ out:
return ret;
}
-static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return btrfs_scrub_cancel(root->fs_info);
+ return btrfs_scrub_cancel(fs_info);
}
-static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
+static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_ioctl_scrub_args *sa;
@@ -4399,7 +4409,7 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
if (IS_ERR(sa))
return PTR_ERR(sa);
- ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
+ ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
@@ -4408,7 +4418,7 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
return ret;
}
-static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
+static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_ioctl_get_dev_stats *sa;
@@ -4423,7 +4433,7 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
return -EPERM;
}
- ret = btrfs_get_dev_stats(root, sa);
+ ret = btrfs_get_dev_stats(fs_info, sa);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
@@ -4432,7 +4442,8 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
return ret;
}
-static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
struct btrfs_ioctl_dev_replace_args *p;
int ret;
@@ -4446,27 +4457,25 @@ static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
switch (p->cmd) {
case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
- if (root->fs_info->sb->s_flags & MS_RDONLY) {
+ if (fs_info->sb->s_flags & MS_RDONLY) {
ret = -EROFS;
goto out;
}
if (atomic_xchg(
- &root->fs_info->mutually_exclusive_operation_running,
- 1)) {
+ &fs_info->mutually_exclusive_operation_running, 1)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
} else {
- ret = btrfs_dev_replace_by_ioctl(root, p);
+ ret = btrfs_dev_replace_by_ioctl(fs_info, p);
atomic_set(
- &root->fs_info->mutually_exclusive_operation_running,
- 0);
+ &fs_info->mutually_exclusive_operation_running, 0);
}
break;
case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
- btrfs_dev_replace_status(root->fs_info, p);
+ btrfs_dev_replace_status(fs_info, p);
ret = 0;
break;
case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
- ret = btrfs_dev_replace_cancel(root->fs_info, p);
+ ret = btrfs_dev_replace_cancel(fs_info, p);
break;
default:
ret = -EINVAL;
@@ -4559,7 +4568,7 @@ static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
return 0;
}
-static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
+static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
void __user *arg)
{
int ret = 0;
@@ -4572,11 +4581,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
return -EPERM;
loi = memdup_user(arg, sizeof(*loi));
- if (IS_ERR(loi)) {
- ret = PTR_ERR(loi);
- loi = NULL;
- goto out;
- }
+ if (IS_ERR(loi))
+ return PTR_ERR(loi);
path = btrfs_alloc_path();
if (!path) {
@@ -4592,7 +4598,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
goto out;
}
- ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
+ ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
build_ino_list, inodes);
if (ret == -EINVAL)
ret = -ENOENT;
@@ -4788,25 +4794,24 @@ out:
return ret;
}
-static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
+static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
switch (cmd) {
case BTRFS_BALANCE_CTL_PAUSE:
- return btrfs_pause_balance(root->fs_info);
+ return btrfs_pause_balance(fs_info);
case BTRFS_BALANCE_CTL_CANCEL:
- return btrfs_cancel_balance(root->fs_info);
+ return btrfs_cancel_balance(fs_info);
}
return -EINVAL;
}
-static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
+static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
void __user *arg)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_balance_args *bargs;
int ret = 0;
@@ -4838,7 +4843,8 @@ out:
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_quota_ctl_args *sa;
struct btrfs_trans_handle *trans = NULL;
int ret;
@@ -4857,8 +4863,8 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
goto drop_write;
}
- down_write(&root->fs_info->subvol_sem);
- trans = btrfs_start_transaction(root->fs_info->tree_root, 2);
+ down_write(&fs_info->subvol_sem);
+ trans = btrfs_start_transaction(fs_info->tree_root, 2);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
@@ -4866,22 +4872,22 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
- ret = btrfs_quota_enable(trans, root->fs_info);
+ ret = btrfs_quota_enable(trans, fs_info);
break;
case BTRFS_QUOTA_CTL_DISABLE:
- ret = btrfs_quota_disable(trans, root->fs_info);
+ ret = btrfs_quota_disable(trans, fs_info);
break;
default:
ret = -EINVAL;
break;
}
- err = btrfs_commit_transaction(trans, root->fs_info->tree_root);
+ err = btrfs_commit_transaction(trans);
if (err && !ret)
ret = err;
out:
kfree(sa);
- up_write(&root->fs_info->subvol_sem);
+ up_write(&fs_info->subvol_sem);
drop_write:
mnt_drop_write_file(file);
return ret;
@@ -4889,7 +4895,9 @@ drop_write:
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_assign_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -4916,19 +4924,19 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
/* FIXME: check if the IDs really exist */
if (sa->assign) {
- ret = btrfs_add_qgroup_relation(trans, root->fs_info,
+ ret = btrfs_add_qgroup_relation(trans, fs_info,
sa->src, sa->dst);
} else {
- ret = btrfs_del_qgroup_relation(trans, root->fs_info,
+ ret = btrfs_del_qgroup_relation(trans, fs_info,
sa->src, sa->dst);
}
/* update qgroup status and info */
- err = btrfs_run_qgroups(trans, root->fs_info);
+ err = btrfs_run_qgroups(trans, fs_info);
if (err < 0)
- btrfs_handle_fs_error(root->fs_info, err,
- "failed to update qgroup status and info");
- err = btrfs_end_transaction(trans, root);
+ btrfs_handle_fs_error(fs_info, err,
+ "failed to update qgroup status and info");
+ err = btrfs_end_transaction(trans);
if (err && !ret)
ret = err;
@@ -4941,7 +4949,9 @@ drop_write:
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_create_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -4973,12 +4983,12 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
/* FIXME: check if the IDs really exist */
if (sa->create) {
- ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid);
+ ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
} else {
- ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
+ ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid);
}
- err = btrfs_end_transaction(trans, root);
+ err = btrfs_end_transaction(trans);
if (err && !ret)
ret = err;
@@ -4991,7 +5001,9 @@ drop_write:
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_limit_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -5024,9 +5036,9 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
}
/* FIXME: check if the IDs really exist */
- ret = btrfs_limit_qgroup(trans, root->fs_info, qgroupid, &sa->lim);
+ ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
- err = btrfs_end_transaction(trans, root);
+ err = btrfs_end_transaction(trans);
if (err && !ret)
ret = err;
@@ -5039,7 +5051,8 @@ drop_write:
static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_quota_rescan_args *qsa;
int ret;
@@ -5061,7 +5074,7 @@ static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
goto out;
}
- ret = btrfs_qgroup_rescan(root->fs_info);
+ ret = btrfs_qgroup_rescan(fs_info);
out:
kfree(qsa);
@@ -5072,7 +5085,8 @@ drop_write:
static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_quota_rescan_args *qsa;
int ret = 0;
@@ -5083,9 +5097,9 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
if (!qsa)
return -ENOMEM;
- if (root->fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
qsa->flags = 1;
- qsa->progress = root->fs_info->qgroup_rescan_progress.objectid;
+ qsa->progress = fs_info->qgroup_rescan_progress.objectid;
}
if (copy_to_user(arg, qsa, sizeof(*qsa)))
@@ -5097,18 +5111,20 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return btrfs_qgroup_wait_for_completion(root->fs_info, true);
+ return btrfs_qgroup_wait_for_completion(fs_info, true);
}
static long _btrfs_ioctl_set_received_subvol(struct file *file,
struct btrfs_ioctl_received_subvol_args *sa)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_root_item *root_item = &root->root_item;
struct btrfs_trans_handle *trans;
@@ -5123,7 +5139,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
if (ret < 0)
return ret;
- down_write(&root->fs_info->subvol_sem);
+ down_write(&fs_info->subvol_sem);
if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
ret = -EINVAL;
@@ -5154,8 +5170,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
BTRFS_UUID_SIZE);
if (received_uuid_changed &&
!btrfs_is_empty_uuid(root_item->received_uuid))
- btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
- root_item->received_uuid,
+ btrfs_uuid_tree_rem(trans, fs_info, root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
root->root_key.objectid);
memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
@@ -5166,15 +5181,14 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
- ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
if (ret < 0) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
- ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
- sa->uuid,
+ ret = btrfs_uuid_tree_add(trans, fs_info, sa->uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
root->root_key.objectid);
if (ret < 0 && ret != -EEXIST) {
@@ -5182,14 +5196,14 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
goto out;
}
}
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
goto out;
}
out:
- up_write(&root->fs_info->subvol_sem);
+ up_write(&fs_info->subvol_sem);
mnt_drop_write_file(file);
return ret;
}
@@ -5203,11 +5217,8 @@ static long btrfs_ioctl_set_received_subvol_32(struct file *file,
int ret = 0;
args32 = memdup_user(arg, sizeof(*args32));
- if (IS_ERR(args32)) {
- ret = PTR_ERR(args32);
- args32 = NULL;
- goto out;
- }
+ if (IS_ERR(args32))
+ return PTR_ERR(args32);
args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
if (!args64) {
@@ -5255,11 +5266,8 @@ static long btrfs_ioctl_set_received_subvol(struct file *file,
int ret = 0;
sa = memdup_user(arg, sizeof(*sa));
- if (IS_ERR(sa)) {
- ret = PTR_ERR(sa);
- sa = NULL;
- goto out;
- }
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
ret = _btrfs_ioctl_set_received_subvol(file, sa);
@@ -5277,20 +5285,22 @@ out:
static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
size_t len;
int ret;
char label[BTRFS_LABEL_SIZE];
- spin_lock(&root->fs_info->super_lock);
- memcpy(label, root->fs_info->super_copy->label, BTRFS_LABEL_SIZE);
- spin_unlock(&root->fs_info->super_lock);
+ spin_lock(&fs_info->super_lock);
+ memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
+ spin_unlock(&fs_info->super_lock);
len = strnlen(label, BTRFS_LABEL_SIZE);
if (len == BTRFS_LABEL_SIZE) {
- btrfs_warn(root->fs_info,
- "label is too long, return the first %zu bytes", --len);
+ btrfs_warn(fs_info,
+ "label is too long, return the first %zu bytes",
+ --len);
}
ret = copy_to_user(arg, label, len);
@@ -5300,8 +5310,10 @@ static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
- struct btrfs_super_block *super_block = root->fs_info->super_copy;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_super_block *super_block = fs_info->super_copy;
struct btrfs_trans_handle *trans;
char label[BTRFS_LABEL_SIZE];
int ret;
@@ -5313,7 +5325,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
return -EFAULT;
if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"unable to set label with more than %d bytes",
BTRFS_LABEL_SIZE - 1);
return -EINVAL;
@@ -5329,10 +5341,10 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
goto out_unlock;
}
- spin_lock(&root->fs_info->super_lock);
+ spin_lock(&fs_info->super_lock);
strcpy(super_block->label, label);
- spin_unlock(&root->fs_info->super_lock);
- ret = btrfs_commit_transaction(trans, root);
+ spin_unlock(&fs_info->super_lock);
+ ret = btrfs_commit_transaction(trans);
out_unlock:
mnt_drop_write_file(file);
@@ -5360,8 +5372,9 @@ int btrfs_ioctl_get_supported_features(void __user *arg)
static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
- struct btrfs_super_block *super_block = root->fs_info->super_copy;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_super_block *super_block = fs_info->super_copy;
struct btrfs_ioctl_feature_flags features;
features.compat_flags = btrfs_super_compat_flags(super_block);
@@ -5374,7 +5387,7 @@ static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
return 0;
}
-static int check_feature_bits(struct btrfs_root *root,
+static int check_feature_bits(struct btrfs_fs_info *fs_info,
enum btrfs_feature_set set,
u64 change_mask, u64 flags, u64 supported_flags,
u64 safe_set, u64 safe_clear)
@@ -5389,14 +5402,14 @@ static int check_feature_bits(struct btrfs_root *root,
if (unsupported) {
names = btrfs_printable_features(set, unsupported);
if (names) {
- btrfs_warn(root->fs_info,
- "this kernel does not support the %s feature bit%s",
- names, strchr(names, ',') ? "s" : "");
+ btrfs_warn(fs_info,
+ "this kernel does not support the %s feature bit%s",
+ names, strchr(names, ',') ? "s" : "");
kfree(names);
} else
- btrfs_warn(root->fs_info,
- "this kernel does not support %s bits 0x%llx",
- type, unsupported);
+ btrfs_warn(fs_info,
+ "this kernel does not support %s bits 0x%llx",
+ type, unsupported);
return -EOPNOTSUPP;
}
@@ -5404,14 +5417,14 @@ static int check_feature_bits(struct btrfs_root *root,
if (disallowed) {
names = btrfs_printable_features(set, disallowed);
if (names) {
- btrfs_warn(root->fs_info,
- "can't set the %s feature bit%s while mounted",
- names, strchr(names, ',') ? "s" : "");
+ btrfs_warn(fs_info,
+ "can't set the %s feature bit%s while mounted",
+ names, strchr(names, ',') ? "s" : "");
kfree(names);
} else
- btrfs_warn(root->fs_info,
- "can't set %s bits 0x%llx while mounted",
- type, disallowed);
+ btrfs_warn(fs_info,
+ "can't set %s bits 0x%llx while mounted",
+ type, disallowed);
return -EPERM;
}
@@ -5419,30 +5432,32 @@ static int check_feature_bits(struct btrfs_root *root,
if (disallowed) {
names = btrfs_printable_features(set, disallowed);
if (names) {
- btrfs_warn(root->fs_info,
- "can't clear the %s feature bit%s while mounted",
- names, strchr(names, ',') ? "s" : "");
+ btrfs_warn(fs_info,
+ "can't clear the %s feature bit%s while mounted",
+ names, strchr(names, ',') ? "s" : "");
kfree(names);
} else
- btrfs_warn(root->fs_info,
- "can't clear %s bits 0x%llx while mounted",
- type, disallowed);
+ btrfs_warn(fs_info,
+ "can't clear %s bits 0x%llx while mounted",
+ type, disallowed);
return -EPERM;
}
return 0;
}
-#define check_feature(root, change_mask, flags, mask_base) \
-check_feature_bits(root, FEAT_##mask_base, change_mask, flags, \
+#define check_feature(fs_info, change_mask, flags, mask_base) \
+check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
BTRFS_FEATURE_ ## mask_base ## _SUPP, \
BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
- struct btrfs_super_block *super_block = root->fs_info->super_copy;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_super_block *super_block = fs_info->super_copy;
struct btrfs_ioctl_feature_flags flags[2];
struct btrfs_trans_handle *trans;
u64 newflags;
@@ -5459,17 +5474,17 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
!flags[0].incompat_flags)
return 0;
- ret = check_feature(root, flags[0].compat_flags,
+ ret = check_feature(fs_info, flags[0].compat_flags,
flags[1].compat_flags, COMPAT);
if (ret)
return ret;
- ret = check_feature(root, flags[0].compat_ro_flags,
+ ret = check_feature(fs_info, flags[0].compat_ro_flags,
flags[1].compat_ro_flags, COMPAT_RO);
if (ret)
return ret;
- ret = check_feature(root, flags[0].incompat_flags,
+ ret = check_feature(fs_info, flags[0].incompat_flags,
flags[1].incompat_flags, INCOMPAT);
if (ret)
return ret;
@@ -5484,7 +5499,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
goto out_drop_write;
}
- spin_lock(&root->fs_info->super_lock);
+ spin_lock(&fs_info->super_lock);
newflags = btrfs_super_compat_flags(super_block);
newflags |= flags[0].compat_flags & flags[1].compat_flags;
newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
@@ -5499,9 +5514,9 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
btrfs_set_super_incompat_flags(super_block, newflags);
- spin_unlock(&root->fs_info->super_lock);
+ spin_unlock(&fs_info->super_lock);
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
out_drop_write:
mnt_drop_write_file(file);
@@ -5511,7 +5526,9 @@ out_drop_write:
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
- struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
void __user *argp = (void __user *)arg;
switch (cmd) {
@@ -5546,15 +5563,15 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_RESIZE:
return btrfs_ioctl_resize(file, argp);
case BTRFS_IOC_ADD_DEV:
- return btrfs_ioctl_add_dev(root, argp);
+ return btrfs_ioctl_add_dev(fs_info, argp);
case BTRFS_IOC_RM_DEV:
return btrfs_ioctl_rm_dev(file, argp);
case BTRFS_IOC_RM_DEV_V2:
return btrfs_ioctl_rm_dev_v2(file, argp);
case BTRFS_IOC_FS_INFO:
- return btrfs_ioctl_fs_info(root, argp);
+ return btrfs_ioctl_fs_info(fs_info, argp);
case BTRFS_IOC_DEV_INFO:
- return btrfs_ioctl_dev_info(root, argp);
+ return btrfs_ioctl_dev_info(fs_info, argp);
case BTRFS_IOC_BALANCE:
return btrfs_ioctl_balance(file, NULL);
case BTRFS_IOC_TRANS_START:
@@ -5570,40 +5587,40 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_INO_PATHS:
return btrfs_ioctl_ino_to_path(root, argp);
case BTRFS_IOC_LOGICAL_INO:
- return btrfs_ioctl_logical_to_ino(root, argp);
+ return btrfs_ioctl_logical_to_ino(fs_info, argp);
case BTRFS_IOC_SPACE_INFO:
- return btrfs_ioctl_space_info(root, argp);
+ return btrfs_ioctl_space_info(fs_info, argp);
case BTRFS_IOC_SYNC: {
int ret;
- ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1);
+ ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
if (ret)
return ret;
- ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
+ ret = btrfs_sync_fs(inode->i_sb, 1);
/*
* The transaction thread may want to do more work,
* namely it pokes the cleaner kthread that will start
* processing uncleaned subvols.
*/
- wake_up_process(root->fs_info->transaction_kthread);
+ wake_up_process(fs_info->transaction_kthread);
return ret;
}
case BTRFS_IOC_START_SYNC:
return btrfs_ioctl_start_sync(root, argp);
case BTRFS_IOC_WAIT_SYNC:
- return btrfs_ioctl_wait_sync(root, argp);
+ return btrfs_ioctl_wait_sync(fs_info, argp);
case BTRFS_IOC_SCRUB:
return btrfs_ioctl_scrub(file, argp);
case BTRFS_IOC_SCRUB_CANCEL:
- return btrfs_ioctl_scrub_cancel(root, argp);
+ return btrfs_ioctl_scrub_cancel(fs_info);
case BTRFS_IOC_SCRUB_PROGRESS:
- return btrfs_ioctl_scrub_progress(root, argp);
+ return btrfs_ioctl_scrub_progress(fs_info, argp);
case BTRFS_IOC_BALANCE_V2:
return btrfs_ioctl_balance(file, argp);
case BTRFS_IOC_BALANCE_CTL:
- return btrfs_ioctl_balance_ctl(root, arg);
+ return btrfs_ioctl_balance_ctl(fs_info, arg);
case BTRFS_IOC_BALANCE_PROGRESS:
- return btrfs_ioctl_balance_progress(root, argp);
+ return btrfs_ioctl_balance_progress(fs_info, argp);
case BTRFS_IOC_SET_RECEIVED_SUBVOL:
return btrfs_ioctl_set_received_subvol(file, argp);
#ifdef CONFIG_64BIT
@@ -5613,7 +5630,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_SEND:
return btrfs_ioctl_send(file, argp);
case BTRFS_IOC_GET_DEV_STATS:
- return btrfs_ioctl_get_dev_stats(root, argp);
+ return btrfs_ioctl_get_dev_stats(fs_info, argp);
case BTRFS_IOC_QUOTA_CTL:
return btrfs_ioctl_quota_ctl(file, argp);
case BTRFS_IOC_QGROUP_ASSIGN:
@@ -5629,7 +5646,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
return btrfs_ioctl_quota_rescan_wait(file, argp);
case BTRFS_IOC_DEV_REPLACE:
- return btrfs_ioctl_dev_replace(root, argp);
+ return btrfs_ioctl_dev_replace(fs_info, argp);
case BTRFS_IOC_GET_FSLABEL:
return btrfs_ioctl_get_fslabel(file, argp);
case BTRFS_IOC_SET_FSLABEL:
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 48655da0f4ca..45d26980caf9 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -254,25 +254,21 @@ out:
return ret;
}
-static int lzo_decompress_biovec(struct list_head *ws,
+static int lzo_decompress_bio(struct list_head *ws,
struct page **pages_in,
u64 disk_start,
- struct bio_vec *bvec,
- int vcnt,
+ struct bio *orig_bio,
size_t srclen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0, ret2;
char *data_in;
unsigned long page_in_index = 0;
- unsigned long page_out_index = 0;
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long buf_offset = 0;
unsigned long bytes;
unsigned long working_bytes;
- unsigned long pg_offset;
-
size_t in_len;
size_t out_len;
unsigned long in_offset;
@@ -292,7 +288,6 @@ static int lzo_decompress_biovec(struct list_head *ws,
in_page_bytes_left = PAGE_SIZE - LZO_LEN;
tot_out = 0;
- pg_offset = 0;
while (tot_in < tot_len) {
in_len = read_compress_length(data_in + in_offset);
@@ -365,16 +360,14 @@ cont:
tot_out += out_len;
ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
- tot_out, disk_start,
- bvec, vcnt,
- &page_out_index, &pg_offset);
+ tot_out, disk_start, orig_bio);
if (ret2 == 0)
break;
}
done:
kunmap(pages_in[page_in_index]);
if (!ret)
- btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
+ zero_fill_bio(orig_bio);
return ret;
}
@@ -438,6 +431,6 @@ const struct btrfs_compress_op btrfs_lzo_compress = {
.alloc_workspace = lzo_alloc_workspace,
.free_workspace = lzo_free_workspace,
.compress_pages = lzo_compress_pages,
- .decompress_biovec = lzo_decompress_biovec,
+ .decompress_bio = lzo_decompress_bio,
.decompress = lzo_decompress,
};
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b2d1e95de7be..041c3326d109 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -186,6 +186,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len,
int type, int dio, int compress_type)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
@@ -234,11 +235,10 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
&root->ordered_extents);
root->nr_ordered_extents++;
if (root->nr_ordered_extents == 1) {
- spin_lock(&root->fs_info->ordered_root_lock);
+ spin_lock(&fs_info->ordered_root_lock);
BUG_ON(!list_empty(&root->ordered_root));
- list_add_tail(&root->ordered_root,
- &root->fs_info->ordered_roots);
- spin_unlock(&root->fs_info->ordered_root_lock);
+ list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
+ spin_unlock(&fs_info->ordered_root_lock);
}
spin_unlock(&root->ordered_extent_lock);
@@ -303,6 +303,7 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
struct btrfs_ordered_extent **cached,
u64 *file_offset, u64 io_size, int uptodate)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
@@ -331,14 +332,14 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
entry->len);
*file_offset = dec_end;
if (dec_start > dec_end) {
- btrfs_crit(BTRFS_I(inode)->root->fs_info,
- "bad ordering dec_start %llu end %llu", dec_start, dec_end);
+ btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
+ dec_start, dec_end);
}
to_dec = dec_end - dec_start;
if (to_dec > entry->bytes_left) {
- btrfs_crit(BTRFS_I(inode)->root->fs_info,
- "bad ordered accounting left %llu size %llu",
- entry->bytes_left, to_dec);
+ btrfs_crit(fs_info,
+ "bad ordered accounting left %llu size %llu",
+ entry->bytes_left, to_dec);
}
entry->bytes_left -= to_dec;
if (!uptodate)
@@ -588,6 +589,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
void btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_inode_tree *tree;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct rb_node *node;
@@ -618,11 +620,11 @@ void btrfs_remove_ordered_extent(struct inode *inode,
* lock, so be nice and check if trans is set, but ASSERT() so
* if it isn't set a developer will notice.
*/
- spin_lock(&root->fs_info->trans_lock);
- trans = root->fs_info->running_transaction;
+ spin_lock(&fs_info->trans_lock);
+ trans = fs_info->running_transaction;
if (trans)
atomic_inc(&trans->use_count);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
ASSERT(trans);
if (trans) {
@@ -639,10 +641,10 @@ void btrfs_remove_ordered_extent(struct inode *inode,
trace_btrfs_ordered_extent_remove(inode, entry);
if (!root->nr_ordered_extents) {
- spin_lock(&root->fs_info->ordered_root_lock);
+ spin_lock(&fs_info->ordered_root_lock);
BUG_ON(list_empty(&root->ordered_root));
list_del_init(&root->ordered_root);
- spin_unlock(&root->fs_info->ordered_root_lock);
+ spin_unlock(&fs_info->ordered_root_lock);
}
spin_unlock(&root->ordered_extent_lock);
wake_up(&entry->wait);
@@ -664,6 +666,7 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
const u64 range_start, const u64 range_len)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(splice);
LIST_HEAD(skipped);
LIST_HEAD(works);
@@ -694,8 +697,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
btrfs_flush_delalloc_helper,
btrfs_run_ordered_extent_work, NULL, NULL);
list_add_tail(&ordered->work_list, &works);
- btrfs_queue_work(root->fs_info->flush_workers,
- &ordered->flush_work);
+ btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
cond_resched();
spin_lock(&root->ordered_extent_lock);
@@ -978,7 +980,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
ordered->file_offset +
ordered->truncated_len);
} else {
- offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
+ offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
}
disk_i_size = BTRFS_I(inode)->disk_i_size;
@@ -1087,7 +1089,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
unsigned long num_sectors;
unsigned long i;
- u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
+ u32 sectorsize = btrfs_inode_sectorsize(inode);
int index = 0;
ordered = btrfs_lookup_ordered_extent(inode, offset);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 451507776ff5..5f2b0ca28705 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -145,10 +145,10 @@ struct btrfs_ordered_extent {
* calculates the total size you need to allocate for an ordered sum
* structure spanning 'bytes' in the file
*/
-static inline int btrfs_ordered_sum_size(struct btrfs_root *root,
+static inline int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info,
unsigned long bytes)
{
- int num_sectors = (int)DIV_ROUND_UP(bytes, root->sectorsize);
+ int num_sectors = (int)DIV_ROUND_UP(bytes, fs_info->sectorsize);
return sizeof(struct btrfs_ordered_sum) + num_sectors * sizeof(u32);
}
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 438575ea8d25..cdafbf92ef0c 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -161,7 +161,7 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
}
}
-void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
+void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l)
{
int i;
u32 type, nr;
@@ -182,8 +182,9 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
nr = btrfs_header_nritems(l);
- btrfs_info(root->fs_info, "leaf %llu total ptrs %d free space %d",
- btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l));
+ btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d",
+ btrfs_header_bytenr(l), nr,
+ btrfs_leaf_free_space(fs_info, l));
for (i = 0 ; i < nr ; i++) {
item = btrfs_item_nr(i);
btrfs_item_key_to_cpu(l, &key, i);
@@ -314,7 +315,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
}
}
-void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
+void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c)
{
int i; u32 nr;
struct btrfs_key key;
@@ -325,13 +326,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
nr = btrfs_header_nritems(c);
level = btrfs_header_level(c);
if (level == 0) {
- btrfs_print_leaf(root, c);
+ btrfs_print_leaf(fs_info, c);
return;
}
- btrfs_info(root->fs_info,
+ btrfs_info(fs_info,
"node %llu level %d total ptrs %d free spc %u",
btrfs_header_bytenr(c), level, nr,
- (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
+ (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
pr_info("\tkey %d (%llu %u %llu) block %llu\n",
@@ -339,7 +340,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
btrfs_node_blockptr(c, i));
}
for (i = 0; i < nr; i++) {
- struct extent_buffer *next = read_tree_block(root,
+ struct extent_buffer *next = read_tree_block(fs_info,
btrfs_node_blockptr(c, i),
btrfs_node_ptr_generation(c, i));
if (IS_ERR(next)) {
@@ -355,7 +356,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
if (btrfs_header_level(next) !=
level - 1)
BUG();
- btrfs_print_tree(root, next);
+ btrfs_print_tree(fs_info, next);
free_extent_buffer(next);
}
}
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
index 7faddfacc5bd..4f2e0ea0e95a 100644
--- a/fs/btrfs/print-tree.h
+++ b/fs/btrfs/print-tree.h
@@ -18,6 +18,6 @@
#ifndef __PRINT_TREE_
#define __PRINT_TREE_
-void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
-void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c);
+void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l);
+void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c);
#endif
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index cf0b444ac4f3..f2621e330954 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -301,6 +301,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
struct inode *parent)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int i;
@@ -320,14 +321,14 @@ static int inherit_props(struct btrfs_trans_handle *trans,
if (!value)
continue;
- num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
num_bytes, BTRFS_RESERVE_NO_FLUSH);
if (ret)
goto out;
ret = __btrfs_set_prop(trans, inode, h->xattr_name,
value, strlen(value), 0);
- btrfs_block_rsv_release(root, trans->block_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, trans->block_rsv, num_bytes);
if (ret)
goto out;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 11f4fffe503e..662821f1252c 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -131,8 +131,15 @@ struct btrfs_qgroup_list {
struct btrfs_qgroup *member;
};
-#define ptr_to_u64(x) ((u64)(uintptr_t)x)
-#define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
+static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
+{
+ return (u64)(uintptr_t)qg;
+}
+
+static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
+{
+ return (struct btrfs_qgroup *)(uintptr_t)n->aux;
+}
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
@@ -1012,7 +1019,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
list_del(&quota_root->dirty_list);
btrfs_tree_lock(quota_root->node);
- clean_tree_block(trans, tree_root->fs_info, quota_root->node);
+ clean_tree_block(trans, fs_info, quota_root->node);
btrfs_tree_unlock(quota_root->node);
btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
@@ -1066,7 +1073,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
/* Get all of the parent groups that contain this qgroup */
list_for_each_entry(glist, &qgroup->groups, next_group) {
ret = ulist_add(tmp, glist->group->qgroupid,
- ptr_to_u64(glist->group), GFP_ATOMIC);
+ qgroup_to_aux(glist->group), GFP_ATOMIC);
if (ret < 0)
goto out;
}
@@ -1074,7 +1081,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
/* Iterate all of the parents and adjust their reference counts */
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(tmp, &uiter))) {
- qgroup = u64_to_ptr(unode->aux);
+ qgroup = unode_aux_to_qgroup(unode);
qgroup->rfer += sign * num_bytes;
qgroup->rfer_cmpr += sign * num_bytes;
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
@@ -1087,7 +1094,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
/* Add any parents of the parents */
list_for_each_entry(glist, &qgroup->groups, next_group) {
ret = ulist_add(tmp, glist->group->qgroupid,
- ptr_to_u64(glist->group), GFP_ATOMIC);
+ qgroup_to_aux(glist->group), GFP_ATOMIC);
if (ret < 0)
goto out;
}
@@ -1185,7 +1192,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
}
spin_lock(&fs_info->qgroup_lock);
- ret = add_relation_rb(quota_root->fs_info, src, dst);
+ ret = add_relation_rb(fs_info, src, dst);
if (ret < 0) {
spin_unlock(&fs_info->qgroup_lock);
goto out;
@@ -1333,7 +1340,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
}
spin_lock(&fs_info->qgroup_lock);
- del_qgroup_rb(quota_root->fs_info, qgroupid);
+ del_qgroup_rb(fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
@@ -1450,7 +1457,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
return ret;
}
-int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record)
{
@@ -1460,7 +1467,7 @@ int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
u64 bytenr = record->bytenr;
assert_spin_locked(&delayed_refs->lock);
- trace_btrfs_qgroup_insert_dirty_extent(fs_info, record);
+ trace_btrfs_qgroup_trace_extent(fs_info, record);
while (*p) {
parent_node = *p;
@@ -1479,7 +1486,7 @@ int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
gfp_t gfp_flag)
{
@@ -1502,14 +1509,228 @@ int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
record->old_roots = NULL;
spin_lock(&delayed_refs->lock);
- ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
- record);
+ ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
spin_unlock(&delayed_refs->lock);
if (ret > 0)
kfree(record);
return 0;
}
+int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb)
+{
+ int nr = btrfs_header_nritems(eb);
+ int i, extent_type, ret;
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *fi;
+ u64 bytenr, num_bytes;
+
+ /* We can be called directly from walk_up_proc() */
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return 0;
+
+ for (i = 0; i < nr; i++) {
+ btrfs_item_key_to_cpu(eb, &key, i);
+
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ continue;
+
+ fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
+ /* filter out non qgroup-accountable extents */
+ extent_type = btrfs_file_extent_type(eb, fi);
+
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+ continue;
+
+ bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
+ if (!bytenr)
+ continue;
+
+ num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
+
+ ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
+ num_bytes, GFP_NOFS);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Walk up the tree from the bottom, freeing leaves and any interior
+ * nodes which have had all slots visited. If a node (leaf or
+ * interior) is freed, the node above it will have it's slot
+ * incremented. The root node will never be freed.
+ *
+ * At the end of this function, we should have a path which has all
+ * slots incremented to the next position for a search. If we need to
+ * read a new node it will be NULL and the node above it will have the
+ * correct slot selected for a later read.
+ *
+ * If we increment the root nodes slot counter past the number of
+ * elements, 1 is returned to signal completion of the search.
+ */
+static int adjust_slots_upwards(struct btrfs_root *root,
+ struct btrfs_path *path, int root_level)
+{
+ int level = 0;
+ int nr, slot;
+ struct extent_buffer *eb;
+
+ if (root_level == 0)
+ return 1;
+
+ while (level <= root_level) {
+ eb = path->nodes[level];
+ nr = btrfs_header_nritems(eb);
+ path->slots[level]++;
+ slot = path->slots[level];
+ if (slot >= nr || level == 0) {
+ /*
+ * Don't free the root - we will detect this
+ * condition after our loop and return a
+ * positive value for caller to stop walking the tree.
+ */
+ if (level != root_level) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ path->locks[level] = 0;
+
+ free_extent_buffer(eb);
+ path->nodes[level] = NULL;
+ path->slots[level] = 0;
+ }
+ } else {
+ /*
+ * We have a valid slot to walk back down
+ * from. Stop here so caller can process these
+ * new nodes.
+ */
+ break;
+ }
+
+ level++;
+ }
+
+ eb = path->nodes[root_level];
+ if (path->slots[root_level] >= btrfs_header_nritems(eb))
+ return 1;
+
+ return 0;
+}
+
+int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *root_eb,
+ u64 root_gen, int root_level)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret = 0;
+ int level;
+ struct extent_buffer *eb = root_eb;
+ struct btrfs_path *path = NULL;
+
+ BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
+ BUG_ON(root_eb == NULL);
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return 0;
+
+ if (!extent_buffer_uptodate(root_eb)) {
+ ret = btrfs_read_buffer(root_eb, root_gen);
+ if (ret)
+ goto out;
+ }
+
+ if (root_level == 0) {
+ ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
+ goto out;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /*
+ * Walk down the tree. Missing extent blocks are filled in as
+ * we go. Metadata is accounted every time we read a new
+ * extent block.
+ *
+ * When we reach a leaf, we account for file extent items in it,
+ * walk back up the tree (adjusting slot pointers as we go)
+ * and restart the search process.
+ */
+ extent_buffer_get(root_eb); /* For path */
+ path->nodes[root_level] = root_eb;
+ path->slots[root_level] = 0;
+ path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
+walk_down:
+ level = root_level;
+ while (level >= 0) {
+ if (path->nodes[level] == NULL) {
+ int parent_slot;
+ u64 child_gen;
+ u64 child_bytenr;
+
+ /*
+ * We need to get child blockptr/gen from parent before
+ * we can read it.
+ */
+ eb = path->nodes[level + 1];
+ parent_slot = path->slots[level + 1];
+ child_bytenr = btrfs_node_blockptr(eb, parent_slot);
+ child_gen = btrfs_node_ptr_generation(eb, parent_slot);
+
+ eb = read_tree_block(fs_info, child_bytenr, child_gen);
+ if (IS_ERR(eb)) {
+ ret = PTR_ERR(eb);
+ goto out;
+ } else if (!extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ ret = -EIO;
+ goto out;
+ }
+
+ path->nodes[level] = eb;
+ path->slots[level] = 0;
+
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
+
+ ret = btrfs_qgroup_trace_extent(trans, fs_info,
+ child_bytenr,
+ fs_info->nodesize,
+ GFP_NOFS);
+ if (ret)
+ goto out;
+ }
+
+ if (level == 0) {
+ ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
+ path->nodes[level]);
+ if (ret)
+ goto out;
+
+ /* Nonzero return here means we completed our search */
+ ret = adjust_slots_upwards(root, path, root_level);
+ if (ret)
+ break;
+
+ /* Restart search with new slots */
+ goto walk_down;
+ }
+
+ level--;
+ }
+
+ ret = 0;
+out:
+ btrfs_free_path(path);
+
+ return ret;
+}
+
#define UPDATE_NEW 0
#define UPDATE_OLD 1
/*
@@ -1535,30 +1756,30 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
continue;
ulist_reinit(tmp);
- ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
+ ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
GFP_ATOMIC);
if (ret < 0)
return ret;
- ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
+ ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
if (ret < 0)
return ret;
ULIST_ITER_INIT(&tmp_uiter);
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
struct btrfs_qgroup_list *glist;
- qg = u64_to_ptr(tmp_unode->aux);
+ qg = unode_aux_to_qgroup(tmp_unode);
if (update_old)
btrfs_qgroup_update_old_refcnt(qg, seq, 1);
else
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
list_for_each_entry(glist, &qg->groups, next_group) {
ret = ulist_add(qgroups, glist->group->qgroupid,
- ptr_to_u64(glist->group),
+ qgroup_to_aux(glist->group),
GFP_ATOMIC);
if (ret < 0)
return ret;
ret = ulist_add(tmp, glist->group->qgroupid,
- ptr_to_u64(glist->group),
+ qgroup_to_aux(glist->group),
GFP_ATOMIC);
if (ret < 0)
return ret;
@@ -1619,7 +1840,7 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
while ((unode = ulist_next(qgroups, &uiter))) {
bool dirty = false;
- qg = u64_to_ptr(unode->aux);
+ qg = unode_aux_to_qgroup(unode);
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
@@ -1950,7 +2171,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
}
rcu_read_lock();
- level_size = srcroot->nodesize;
+ level_size = fs_info->nodesize;
rcu_read_unlock();
}
@@ -2034,8 +2255,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
i_qgroups = (u64 *)(inherit + 1);
for (i = 0; i < inherit->num_qgroups; ++i) {
if (*i_qgroups) {
- ret = add_relation_rb(quota_root->fs_info, objectid,
- *i_qgroups);
+ ret = add_relation_rb(fs_info, objectid, *i_qgroups);
if (ret)
goto unlock;
}
@@ -2125,7 +2345,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
- qg = u64_to_ptr(unode->aux);
+ qg = unode_aux_to_qgroup(unode);
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
qg->reserved + (s64)qg->rfer + num_bytes >
@@ -2157,7 +2377,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
struct btrfs_qgroup *qg;
- qg = u64_to_ptr(unode->aux);
+ qg = unode_aux_to_qgroup(unode);
qg->reserved += num_bytes;
}
@@ -2202,7 +2422,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
- qg = u64_to_ptr(unode->aux);
+ qg = unode_aux_to_qgroup(unode);
qg->reserved -= num_bytes;
@@ -2302,7 +2522,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
found.type != BTRFS_METADATA_ITEM_KEY)
continue;
if (found.type == BTRFS_METADATA_ITEM_KEY)
- num_bytes = fs_info->extent_root->nodesize;
+ num_bytes = fs_info->nodesize;
else
num_bytes = found.offset;
@@ -2335,10 +2555,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
int err = -ENOMEM;
int ret = 0;
- mutex_lock(&fs_info->qgroup_rescan_lock);
- fs_info->qgroup_rescan_running = true;
- mutex_unlock(&fs_info->qgroup_rescan_lock);
-
path = btrfs_alloc_path();
if (!path)
goto out;
@@ -2356,9 +2572,9 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
err = qgroup_rescan_leaf(fs_info, path, trans);
}
if (err > 0)
- btrfs_commit_transaction(trans, fs_info->fs_root);
+ btrfs_commit_transaction(trans);
else
- btrfs_end_transaction(trans, fs_info->fs_root);
+ btrfs_end_transaction(trans);
}
out:
@@ -2393,7 +2609,7 @@ out:
err = ret;
btrfs_err(fs_info, "fail to update qgroup status: %d", err);
}
- btrfs_end_transaction(trans, fs_info->quota_root);
+ btrfs_end_transaction(trans);
if (btrfs_fs_closing(fs_info)) {
btrfs_info(fs_info, "qgroup scan paused");
@@ -2449,6 +2665,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
sizeof(fs_info->qgroup_rescan_progress));
fs_info->qgroup_rescan_progress.objectid = progress_objectid;
init_completion(&fs_info->qgroup_rescan_completion);
+ fs_info->qgroup_rescan_running = true;
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
@@ -2512,7 +2729,7 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
return PTR_ERR(trans);
}
- ret = btrfs_commit_transaction(trans, fs_info->fs_root);
+ ret = btrfs_commit_transaction(trans);
if (ret) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
return ret;
@@ -2677,13 +2894,14 @@ int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid) || num_bytes == 0)
return 0;
- BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
+ BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
ret = qgroup_reserve(root, num_bytes);
if (ret < 0)
return ret;
@@ -2693,9 +2911,10 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int reserved;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid))
return;
@@ -2707,11 +2926,13 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
{
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid))
return;
- BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
+ BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
atomic_sub(num_bytes, &root->qgroup_meta_rsv);
qgroup_free(root, num_bytes);
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 1bc64c864b62..416ae8e1d23c 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -23,6 +23,34 @@
#include "delayed-ref.h"
/*
+ * Btrfs qgroup overview
+ *
+ * Btrfs qgroup splits into 3 main part:
+ * 1) Reserve
+ * Reserve metadata/data space for incoming operations
+ * Affect how qgroup limit works
+ *
+ * 2) Trace
+ * Tell btrfs qgroup to trace dirty extents.
+ *
+ * Dirty extents including:
+ * - Newly allocated extents
+ * - Extents going to be deleted (in this trans)
+ * - Extents whose owner is going to be modified
+ *
+ * This is the main part affects whether qgroup numbers will stay
+ * consistent.
+ * Btrfs qgroup can trace clean extents and won't cause any problem,
+ * but it will consume extra CPU time, it should be avoided if possible.
+ *
+ * 3) Account
+ * Btrfs qgroup will updates its numbers, based on dirty extents traced
+ * in previous step.
+ *
+ * Normally at qgroup rescan and transaction commit time.
+ */
+
+/*
* Record a dirty extent, and info qgroup to update quota on it
* TODO: Use kmem cache to alloc it.
*/
@@ -65,8 +93,8 @@ struct btrfs_delayed_extent_op;
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
/*
- * Insert one dirty extent record into @delayed_refs, informing qgroup to
- * account that extent at commit trans time.
+ * Inform qgroup to trace one dirty extent, its info is recorded in @record.
+ * So qgroup can account it at commit trans time.
*
* No lock version, caller must acquire delayed ref lock and allocate memory.
*
@@ -74,14 +102,15 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
* Return >0 for existing record, caller can free @record safely.
* Error is not possible
*/
-int btrfs_qgroup_insert_dirty_extent_nolock(
+int btrfs_qgroup_trace_extent_nolock(
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record);
/*
- * Insert one dirty extent record into @delayed_refs, informing qgroup to
- * account that extent at commit trans time.
+ * Inform qgroup to trace one dirty extent, specified by @bytenr and
+ * @num_bytes.
+ * So qgroup can account it at commit trans time.
*
* Better encapsulated version.
*
@@ -89,10 +118,33 @@ int btrfs_qgroup_insert_dirty_extent_nolock(
* Return <0 for error, like memory allocation failure or invalid parameter
* (NULL trans)
*/
-int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
gfp_t gfp_flag);
+/*
+ * Inform qgroup to trace all leaf items of data
+ *
+ * Return 0 for success
+ * Return <0 for error(ENOMEM)
+ */
+int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb);
+/*
+ * Inform qgroup to trace a whole subtree, including all its child tree
+ * blocks and data.
+ * The root tree block is specified by @root_eb.
+ *
+ * Normally used by relocation(tree block swap) and subvolume deletion.
+ *
+ * Return 0 for success
+ * Return <0 for error(ENOMEM or tree search error)
+ */
+int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *root_eb,
+ u64 root_gen, int root_level);
int
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index d016d4a79864..d2a9a1ee5361 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -969,8 +969,9 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
* allocation and initial setup for the btrfs_raid_bio. Not
* this does not allocate any pages for rbio->pages.
*/
-static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
- struct btrfs_bio *bbio, u64 stripe_len)
+static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
+ struct btrfs_bio *bbio,
+ u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
int nr_data = 0;
@@ -991,7 +992,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio;
- rbio->fs_info = root->fs_info;
+ rbio->fs_info = fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
rbio->real_stripes = real_stripes;
@@ -1144,10 +1145,10 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
static void index_rbio_pages(struct btrfs_raid_bio *rbio)
{
struct bio *bio;
+ struct bio_vec *bvec;
u64 start;
unsigned long stripe_offset;
unsigned long page_index;
- struct page *p;
int i;
spin_lock_irq(&rbio->bio_list_lock);
@@ -1156,10 +1157,8 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
stripe_offset = start - rbio->bbio->raid_map[0];
page_index = stripe_offset >> PAGE_SHIFT;
- for (i = 0; i < bio->bi_vcnt; i++) {
- p = bio->bi_io_vec[i].bv_page;
- rbio->bio_pages[page_index + i] = p;
- }
+ bio_for_each_segment_all(bvec, bio, i)
+ rbio->bio_pages[page_index + i] = bvec->bv_page;
}
spin_unlock_irq(&rbio->bio_list_lock);
}
@@ -1433,13 +1432,11 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
*/
static void set_bio_pages_uptodate(struct bio *bio)
{
+ struct bio_vec *bvec;
int i;
- struct page *p;
- for (i = 0; i < bio->bi_vcnt; i++) {
- p = bio->bi_io_vec[i].bv_page;
- SetPageUptodate(p);
- }
+ bio_for_each_segment_all(bvec, bio, i)
+ SetPageUptodate(bvec->bv_page);
}
/*
@@ -1482,11 +1479,8 @@ cleanup:
static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper,
- rmw_work, NULL, NULL);
-
- btrfs_queue_work(rbio->fs_info->rmw_workers,
- &rbio->work);
+ btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
+ btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
static void async_read_rebuild(struct btrfs_raid_bio *rbio)
@@ -1494,8 +1488,7 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
btrfs_init_work(&rbio->work, btrfs_rmw_helper,
read_rebuild_work, NULL, NULL);
- btrfs_queue_work(rbio->fs_info->rmw_workers,
- &rbio->work);
+ btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
/*
@@ -1577,8 +1570,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid_rmw_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- btrfs_bio_wq_end_io(rbio->fs_info, bio,
- BTRFS_WQ_ENDIO_RAID56);
+ btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
@@ -1743,7 +1735,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
/*
* our main entry point for writes from the rest of the FS.
*/
-int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
@@ -1751,7 +1743,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct blk_plug_cb *cb;
int ret;
- rbio = alloc_rbio(root, bbio, stripe_len);
+ rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
btrfs_put_bbio(bbio);
return PTR_ERR(rbio);
@@ -1760,7 +1752,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
rbio->bio_list_bytes = bio->bi_iter.bi_size;
rbio->operation = BTRFS_RBIO_WRITE;
- btrfs_bio_counter_inc_noblocked(root->fs_info);
+ btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1;
/*
@@ -1770,16 +1762,15 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
if (rbio_is_full(rbio)) {
ret = full_stripe_write(rbio);
if (ret)
- btrfs_bio_counter_dec(root->fs_info);
+ btrfs_bio_counter_dec(fs_info);
return ret;
}
- cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
- sizeof(*plug));
+ cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
if (cb) {
plug = container_of(cb, struct btrfs_plug_cb, cb);
if (!plug->info) {
- plug->info = root->fs_info;
+ plug->info = fs_info;
INIT_LIST_HEAD(&plug->rbio_list);
}
list_add_tail(&rbio->plug_list, &plug->rbio_list);
@@ -1787,7 +1778,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
} else {
ret = __raid56_parity_write(rbio);
if (ret)
- btrfs_bio_counter_dec(root->fs_info);
+ btrfs_bio_counter_dec(fs_info);
}
return ret;
}
@@ -2102,8 +2093,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid_recover_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- btrfs_bio_wq_end_io(rbio->fs_info, bio,
- BTRFS_WQ_ENDIO_RAID56);
+ btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
@@ -2123,14 +2113,14 @@ cleanup:
* so we assume the bio they send down corresponds to a failed part
* of the drive.
*/
-int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io)
{
struct btrfs_raid_bio *rbio;
int ret;
- rbio = alloc_rbio(root, bbio, stripe_len);
+ rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
if (generic_io)
btrfs_put_bbio(bbio);
@@ -2143,7 +2133,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
- btrfs_warn(root->fs_info,
+ btrfs_warn(fs_info,
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
__func__, (u64)bio->bi_iter.bi_sector << 9,
(u64)bio->bi_iter.bi_size, bbio->map_type);
@@ -2154,7 +2144,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
}
if (generic_io) {
- btrfs_bio_counter_inc_noblocked(root->fs_info);
+ btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1;
} else {
btrfs_get_bbio(bbio);
@@ -2212,7 +2202,7 @@ static void read_rebuild_work(struct btrfs_work *work)
*/
struct btrfs_raid_bio *
-raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors)
@@ -2220,7 +2210,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
struct btrfs_raid_bio *rbio;
int i;
- rbio = alloc_rbio(root, bbio, stripe_len);
+ rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio))
return NULL;
bio_list_add(&rbio->bio_list, bio);
@@ -2239,7 +2229,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
}
/* Now we just support the sectorsize equals to page size */
- ASSERT(root->sectorsize == PAGE_SIZE);
+ ASSERT(fs_info->sectorsize == PAGE_SIZE);
ASSERT(rbio->stripe_npages == stripe_nsectors);
bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
@@ -2621,8 +2611,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid56_parity_scrub_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- btrfs_bio_wq_end_io(rbio->fs_info, bio,
- BTRFS_WQ_ENDIO_RAID56);
+ btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
@@ -2650,8 +2639,7 @@ static void async_scrub_parity(struct btrfs_raid_bio *rbio)
btrfs_init_work(&rbio->work, btrfs_rmw_helper,
scrub_parity_work, NULL, NULL);
- btrfs_queue_work(rbio->fs_info->rmw_workers,
- &rbio->work);
+ btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
@@ -2663,12 +2651,12 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
/* The following code is used for dev replace of a missing RAID 5/6 device. */
struct btrfs_raid_bio *
-raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length)
{
struct btrfs_raid_bio *rbio;
- rbio = alloc_rbio(root, bbio, length);
+ rbio = alloc_rbio(fs_info, bbio, length);
if (IS_ERR(rbio))
return NULL;
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 8b694699d502..4ee4fe346838 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -42,24 +42,24 @@ static inline int nr_data_stripes(struct map_lookup *map)
struct btrfs_raid_bio;
struct btrfs_device;
-int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io);
-int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len);
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
u64 logical);
struct btrfs_raid_bio *
-raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
struct btrfs_raid_bio *
-raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length);
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 75bab76739be..e88bca87f5d2 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -107,18 +107,14 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
/* in case of err, eb might be NULL */
static void __readahead_hook(struct btrfs_fs_info *fs_info,
struct reada_extent *re, struct extent_buffer *eb,
- u64 start, int err)
+ int err)
{
- int level = 0;
int nritems;
int i;
u64 bytenr;
u64 generation;
struct list_head list;
- if (eb)
- level = btrfs_header_level(eb);
-
spin_lock(&re->lock);
/*
* just take the full list from the extent. afterwards we
@@ -143,7 +139,7 @@ static void __readahead_hook(struct btrfs_fs_info *fs_info,
* trigger more readahead depending from the content, e.g.
* fetch the checksums for the extents in the leaf.
*/
- if (!level)
+ if (!btrfs_header_level(eb))
goto cleanup;
nritems = btrfs_header_nritems(eb);
@@ -213,12 +209,8 @@ cleanup:
return;
}
-/*
- * start is passed separately in case eb in NULL, which may be the case with
- * failed I/O
- */
int btree_readahead_hook(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb, u64 start, int err)
+ struct extent_buffer *eb, int err)
{
int ret = 0;
struct reada_extent *re;
@@ -226,7 +218,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
/* find extent */
spin_lock(&fs_info->reada_lock);
re = radix_tree_lookup(&fs_info->reada_tree,
- start >> PAGE_SHIFT);
+ eb->start >> PAGE_SHIFT);
if (re)
re->refcnt++;
spin_unlock(&fs_info->reada_lock);
@@ -235,7 +227,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
goto start_machine;
}
- __readahead_hook(fs_info, re, eb, start, err);
+ __readahead_hook(fs_info, re, eb, err);
reada_extent_put(fs_info, re); /* our ref */
start_machine:
@@ -311,14 +303,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
return zone;
}
-static struct reada_extent *reada_find_extent(struct btrfs_root *root,
+static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
u64 logical,
struct btrfs_key *top)
{
int ret;
struct reada_extent *re = NULL;
struct reada_extent *re_exist = NULL;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_bio *bbio = NULL;
struct btrfs_device *dev;
struct btrfs_device *prev_dev;
@@ -343,7 +334,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
if (!re)
return NULL;
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
re->logical = logical;
re->top = *top;
INIT_LIST_HEAD(&re->extctl);
@@ -354,13 +345,13 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
* map block
*/
length = blocksize;
- ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
- &bbio, 0);
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
+ &length, &bbio, 0);
if (ret || !bbio || length < blocksize)
goto error;
if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"readahead: more than %d copies not supported",
BTRFS_MAX_MIRRORS);
goto error;
@@ -401,7 +392,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
ret = radix_tree_insert(&fs_info->reada_tree, index, re);
if (ret == -EEXIST) {
re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
- BUG_ON(!re_exist);
re_exist->refcnt++;
spin_unlock(&fs_info->reada_lock);
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@@ -448,7 +438,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
/* ignore whether the entry was inserted */
radix_tree_delete(&dev->reada_extents, index);
}
- BUG_ON(fs_info == NULL);
radix_tree_delete(&fs_info->reada_tree, index);
spin_unlock(&fs_info->reada_lock);
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@@ -554,17 +543,18 @@ static void reada_control_release(struct kref *kref)
static int reada_add_block(struct reada_control *rc, u64 logical,
struct btrfs_key *top, u64 generation)
{
- struct btrfs_root *root = rc->root;
+ struct btrfs_fs_info *fs_info = rc->fs_info;
struct reada_extent *re;
struct reada_extctl *rec;
- re = reada_find_extent(root, logical, top); /* takes one ref */
+ /* takes one ref */
+ re = reada_find_extent(fs_info, logical, top);
if (!re)
return -1;
rec = kzalloc(sizeof(*rec), GFP_KERNEL);
if (!rec) {
- reada_extent_put(root->fs_info, re);
+ reada_extent_put(fs_info, re);
return -ENOMEM;
}
@@ -688,7 +678,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
spin_unlock(&fs_info->reada_lock);
return 0;
}
- dev->reada_next = re->logical + fs_info->tree_root->nodesize;
+ dev->reada_next = re->logical + fs_info->nodesize;
re->refcnt++;
spin_unlock(&fs_info->reada_lock);
@@ -714,12 +704,11 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
logical = re->logical;
atomic_inc(&dev->reada_in_flight);
- ret = reada_tree_block_flagged(fs_info->extent_root, logical,
- mirror_num, &eb);
+ ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
if (ret)
- __readahead_hook(fs_info, re, NULL, logical, ret);
+ __readahead_hook(fs_info, re, NULL, ret);
else if (eb)
- __readahead_hook(fs_info, re, eb, eb->start, ret);
+ __readahead_hook(fs_info, re, eb, ret);
if (eb)
free_extent_buffer(eb);
@@ -852,7 +841,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
if (ret == 0)
break;
pr_debug(" re: logical %llu size %u empty %d scheduled %d",
- re->logical, fs_info->tree_root->nodesize,
+ re->logical, fs_info->nodesize,
list_empty(&re->extctl), re->scheduled);
for (i = 0; i < re->nzones; ++i) {
@@ -885,7 +874,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
continue;
}
pr_debug("re: logical %llu size %u list empty %d scheduled %d",
- re->logical, fs_info->tree_root->nodesize,
+ re->logical, fs_info->nodesize,
list_empty(&re->extctl), re->scheduled);
for (i = 0; i < re->nzones; ++i) {
pr_cont(" zone %llu-%llu devs",
@@ -924,7 +913,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
if (!rc)
return ERR_PTR(-ENOMEM);
- rc->root = root;
+ rc->fs_info = root->fs_info;
rc->key_start = *key_start;
rc->key_end = *key_end;
atomic_set(&rc->elems, 0);
@@ -952,18 +941,17 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
int btrfs_reada_wait(void *handle)
{
struct reada_control *rc = handle;
- struct btrfs_fs_info *fs_info = rc->root->fs_info;
+ struct btrfs_fs_info *fs_info = rc->fs_info;
while (atomic_read(&rc->elems)) {
if (!atomic_read(&fs_info->reada_works_cnt))
reada_start_machine(fs_info);
wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
5 * HZ);
- dump_devs(rc->root->fs_info,
- atomic_read(&rc->elems) < 10 ? 1 : 0);
+ dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
}
- dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
+ dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
kref_put(&rc->refcnt, reada_control_release);
@@ -973,7 +961,7 @@ int btrfs_reada_wait(void *handle)
int btrfs_reada_wait(void *handle)
{
struct reada_control *rc = handle;
- struct btrfs_fs_info *fs_info = rc->root->fs_info;
+ struct btrfs_fs_info *fs_info = rc->fs_info;
while (atomic_read(&rc->elems)) {
if (!atomic_read(&fs_info->reada_works_cnt))
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c4af0cdb783d..379711048fb0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1288,9 +1288,10 @@ fail:
*/
static int __must_check __add_reloc_root(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node;
- struct reloc_control *rc = root->fs_info->reloc_ctl;
+ struct reloc_control *rc = fs_info->reloc_ctl;
node = kmalloc(sizeof(*node), GFP_NOFS);
if (!node)
@@ -1304,7 +1305,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
- btrfs_panic(root->fs_info, -EEXIST,
+ btrfs_panic(fs_info, -EEXIST,
"Duplicate root found for start=%llu while inserting into relocation tree",
node->bytenr);
kfree(node);
@@ -1321,9 +1322,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
*/
static void __del_reloc_root(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node = NULL;
- struct reloc_control *rc = root->fs_info->reloc_ctl;
+ struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
@@ -1338,9 +1340,9 @@ static void __del_reloc_root(struct btrfs_root *root)
return;
BUG_ON((struct btrfs_root *)node->data != root);
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
list_del_init(&root->root_list);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
kfree(node);
}
@@ -1350,9 +1352,10 @@ static void __del_reloc_root(struct btrfs_root *root)
*/
static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node = NULL;
- struct reloc_control *rc = root->fs_info->reloc_ctl;
+ struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
@@ -1380,11 +1383,11 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct extent_buffer *eb;
struct btrfs_root_item *root_item;
struct btrfs_key root_key;
- u64 last_snap = 0;
int ret;
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
@@ -1395,14 +1398,22 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
root_key.offset = objectid;
if (root->root_key.objectid == objectid) {
+ u64 commit_root_gen;
+
/* called by btrfs_init_reloc_root */
ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
BTRFS_TREE_RELOC_OBJECTID);
BUG_ON(ret);
-
- last_snap = btrfs_root_last_snapshot(&root->root_item);
- btrfs_set_root_last_snapshot(&root->root_item,
- trans->transid - 1);
+ /*
+ * Set the last_snapshot field to the generation of the commit
+ * root - like this ctree.c:btrfs_block_can_be_shared() behaves
+ * correctly (returns true) when the relocation root is created
+ * either inside the critical section of a transaction commit
+ * (through transaction.c:qgroup_account_snapshot()) and when
+ * it's created before the transaction commit is started.
+ */
+ commit_root_gen = btrfs_header_generation(root->commit_root);
+ btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
} else {
/*
* called by btrfs_reloc_post_snapshot_hook.
@@ -1426,23 +1437,17 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
memset(&root_item->drop_progress, 0,
sizeof(struct btrfs_disk_key));
root_item->drop_level = 0;
- /*
- * abuse rtransid, it is safe because it is impossible to
- * receive data into a relocation tree.
- */
- btrfs_set_root_rtransid(root_item, last_snap);
- btrfs_set_root_otransid(root_item, trans->transid);
}
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
- ret = btrfs_insert_root(trans, root->fs_info->tree_root,
+ ret = btrfs_insert_root(trans, fs_info->tree_root,
&root_key, root_item);
BUG_ON(ret);
kfree(root_item);
- reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key);
+ reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
BUG_ON(IS_ERR(reloc_root));
reloc_root->last_trans = trans->transid;
return reloc_root;
@@ -1455,8 +1460,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
- struct reloc_control *rc = root->fs_info->reloc_ctl;
+ struct reloc_control *rc = fs_info->reloc_ctl;
struct btrfs_block_rsv *rsv;
int clear_rsv = 0;
int ret;
@@ -1492,6 +1498,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct btrfs_root_item *root_item;
int ret;
@@ -1502,7 +1509,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
reloc_root = root->reloc_root;
root_item = &reloc_root->root_item;
- if (root->fs_info->reloc_ctl->merge_reloc_tree &&
+ if (fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) {
root->reloc_root = NULL;
__del_reloc_root(reloc_root);
@@ -1514,7 +1521,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
reloc_root->commit_root = btrfs_root_node(reloc_root);
}
- ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ ret = btrfs_update_root(trans, fs_info->tree_root,
&reloc_root->root_key, root_item);
BUG_ON(ret);
@@ -1642,6 +1649,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *leaf)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
struct inode *inode = NULL;
@@ -1698,8 +1706,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
end = key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
WARN_ON(!IS_ALIGNED(key.offset,
- root->sectorsize));
- WARN_ON(!IS_ALIGNED(end, root->sectorsize));
+ fs_info->sectorsize));
+ WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end);
@@ -1727,7 +1735,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
- ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
+ ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
num_bytes, parent,
btrfs_header_owner(leaf),
key.objectid, key.offset);
@@ -1736,7 +1744,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
break;
}
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
@@ -1777,6 +1785,7 @@ int replace_path(struct btrfs_trans_handle *trans,
struct btrfs_path *path, struct btrfs_key *next_key,
int lowest_level, int max_level)
{
+ struct btrfs_fs_info *fs_info = dest->fs_info;
struct extent_buffer *eb;
struct extent_buffer *parent;
struct btrfs_key key;
@@ -1834,7 +1843,7 @@ again:
btrfs_node_key_to_cpu(parent, next_key, slot + 1);
old_bytenr = btrfs_node_blockptr(parent, slot);
- blocksize = dest->nodesize;
+ blocksize = fs_info->nodesize;
old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
if (level <= max_level) {
@@ -1860,7 +1869,7 @@ again:
break;
}
- eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
+ eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
break;
@@ -1901,6 +1910,29 @@ again:
BUG_ON(ret);
/*
+ * Info qgroup to trace both subtrees.
+ *
+ * We must trace both trees.
+ * 1) Tree reloc subtree
+ * If not traced, we will leak data numbers
+ * 2) Fs subtree
+ * If not traced, we will double count old data
+ * and tree block numbers, if current trans doesn't free
+ * data reloc tree inode.
+ */
+ ret = btrfs_qgroup_trace_subtree(trans, src, parent,
+ btrfs_header_generation(parent),
+ btrfs_header_level(parent));
+ if (ret < 0)
+ break;
+ ret = btrfs_qgroup_trace_subtree(trans, dest,
+ path->nodes[level],
+ btrfs_header_generation(path->nodes[level]),
+ btrfs_header_level(path->nodes[level]));
+ if (ret < 0)
+ break;
+
+ /*
* swap blocks in fs tree and reloc tree.
*/
btrfs_set_node_blockptr(parent, slot, new_bytenr);
@@ -1913,21 +1945,21 @@ again:
path->slots[level], old_ptr_gen);
btrfs_mark_buffer_dirty(path->nodes[level]);
- ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
- path->nodes[level]->start,
+ ret = btrfs_inc_extent_ref(trans, fs_info, old_bytenr,
+ blocksize, path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
- ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
- 0, dest->root_key.objectid, level - 1,
- 0);
+ ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
+ blocksize, 0, dest->root_key.objectid,
+ level - 1, 0);
BUG_ON(ret);
- ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
+ ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize,
path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
- ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
+ ret = btrfs_free_extent(trans, fs_info, old_bytenr, blocksize,
0, dest->root_key.objectid, level - 1,
0);
BUG_ON(ret);
@@ -1986,6 +2018,7 @@ static noinline_for_stack
int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
int *level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *eb = NULL;
int i;
u64 bytenr;
@@ -2016,7 +2049,7 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
}
bytenr = btrfs_node_blockptr(eb, path->slots[i]);
- eb = read_tree_block(root, bytenr, ptr_gen);
+ eb = read_tree_block(fs_info, bytenr, ptr_gen);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
@@ -2038,6 +2071,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
struct btrfs_key *min_key,
struct btrfs_key *max_key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode = NULL;
u64 objectid;
u64 start, end;
@@ -2072,7 +2106,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
start = 0;
else {
start = min_key->offset;
- WARN_ON(!IS_ALIGNED(start, root->sectorsize));
+ WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
}
} else {
start = 0;
@@ -2087,7 +2121,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
if (max_key->offset == 0)
continue;
end = max_key->offset;
- WARN_ON(!IS_ALIGNED(end, root->sectorsize));
+ WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
}
} else {
@@ -2127,6 +2161,7 @@ static int find_next_key(struct btrfs_path *path, int level,
static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
LIST_HEAD(inode_list);
struct btrfs_key key;
struct btrfs_key next_key;
@@ -2175,7 +2210,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
btrfs_unlock_up_safe(path, 0);
}
- min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
+ min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
memset(&next_key, 0, sizeof(next_key));
while (1) {
@@ -2236,10 +2271,10 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
path->slots[level]);
root_item->drop_level = level;
- btrfs_end_transaction_throttle(trans, root);
+ btrfs_end_transaction_throttle(trans);
trans = NULL;
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
@@ -2267,9 +2302,9 @@ out:
}
if (trans)
- btrfs_end_transaction_throttle(trans, root);
+ btrfs_end_transaction_throttle(trans);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
@@ -2281,16 +2316,17 @@ static noinline_for_stack
int prepare_to_merge(struct reloc_control *rc, int err)
{
struct btrfs_root *root = rc->extent_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct btrfs_trans_handle *trans;
LIST_HEAD(reloc_roots);
u64 num_bytes = 0;
int ret;
- mutex_lock(&root->fs_info->reloc_mutex);
- rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
+ mutex_lock(&fs_info->reloc_mutex);
+ rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
rc->merging_rsv_size += rc->nodes_relocated * 2;
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
again:
if (!err) {
@@ -2304,16 +2340,16 @@ again:
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
if (!err)
- btrfs_block_rsv_release(rc->extent_root,
- rc->block_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv,
+ num_bytes);
return PTR_ERR(trans);
}
if (!err) {
if (num_bytes != rc->merging_rsv_size) {
- btrfs_end_transaction(trans, rc->extent_root);
- btrfs_block_rsv_release(rc->extent_root,
- rc->block_rsv, num_bytes);
+ btrfs_end_transaction(trans);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv,
+ num_bytes);
goto again;
}
}
@@ -2325,8 +2361,7 @@ again:
struct btrfs_root, root_list);
list_del_init(&reloc_root->root_list);
- root = read_fs_root(reloc_root->fs_info,
- reloc_root->root_key.offset);
+ root = read_fs_root(fs_info, reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
@@ -2344,9 +2379,9 @@ again:
list_splice(&reloc_roots, &rc->reloc_roots);
if (!err)
- btrfs_commit_transaction(trans, rc->extent_root);
+ btrfs_commit_transaction(trans);
else
- btrfs_end_transaction(trans, rc->extent_root);
+ btrfs_end_transaction(trans);
return err;
}
@@ -2369,11 +2404,9 @@ void free_reloc_roots(struct list_head *list)
static noinline_for_stack
void merge_reloc_roots(struct reloc_control *rc)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_root *root;
struct btrfs_root *reloc_root;
- u64 last_snap;
- u64 otransid;
- u64 objectid;
LIST_HEAD(reloc_roots);
int found = 0;
int ret = 0;
@@ -2386,9 +2419,9 @@ again:
* adding their roots to the list while we are
* doing this splice
*/
- mutex_lock(&root->fs_info->reloc_mutex);
+ mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
while (!list_empty(&reloc_roots)) {
found = 1;
@@ -2396,7 +2429,7 @@ again:
struct btrfs_root, root_list);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
- root = read_fs_root(reloc_root->fs_info,
+ root = read_fs_root(fs_info,
reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
@@ -2412,14 +2445,6 @@ again:
list_del_init(&reloc_root->root_list);
}
- /*
- * we keep the old last snapshot transid in rtranid when we
- * created the relocation tree.
- */
- last_snap = btrfs_root_rtransid(&reloc_root->root_item);
- otransid = btrfs_root_otransid(&reloc_root->root_item);
- objectid = reloc_root->root_key.offset;
-
ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
if (ret < 0) {
if (list_empty(&reloc_root->root_list))
@@ -2435,14 +2460,14 @@ again:
}
out:
if (ret) {
- btrfs_handle_fs_error(root->fs_info, ret, NULL);
+ btrfs_handle_fs_error(fs_info, ret, NULL);
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
/* new reloc root may be added */
- mutex_lock(&root->fs_info->reloc_mutex);
+ mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
}
@@ -2464,12 +2489,13 @@ static void free_block_list(struct rb_root *blocks)
static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *reloc_root)
{
+ struct btrfs_fs_info *fs_info = reloc_root->fs_info;
struct btrfs_root *root;
if (reloc_root->last_trans == trans->transid)
return 0;
- root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset);
+ root = read_fs_root(fs_info, reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
@@ -2579,6 +2605,7 @@ static noinline_for_stack
u64 calcu_metadata_size(struct reloc_control *rc,
struct backref_node *node, int reserve)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *next = node;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
@@ -2593,7 +2620,7 @@ u64 calcu_metadata_size(struct reloc_control *rc,
if (next->processed && (reserve || next != node))
break;
- num_bytes += rc->extent_root->nodesize;
+ num_bytes += fs_info->nodesize;
if (list_empty(&next->upper))
break;
@@ -2613,6 +2640,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
struct backref_node *node)
{
struct btrfs_root *root = rc->extent_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 num_bytes;
int ret;
u64 tmp;
@@ -2630,7 +2658,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret) {
- tmp = rc->extent_root->nodesize * RELOCATION_RESERVED_NODES;
+ tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
while (tmp <= rc->reserved_bytes)
tmp <<= 1;
/*
@@ -2640,8 +2668,8 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
* space for relocation and we will return eailer in
* enospc case.
*/
- rc->block_rsv->size = tmp + rc->extent_root->nodesize *
- RELOCATION_RESERVED_NODES;
+ rc->block_rsv->size = tmp + fs_info->nodesize *
+ RELOCATION_RESERVED_NODES;
return -EAGAIN;
}
@@ -2661,6 +2689,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
struct btrfs_key *key,
struct btrfs_path *path, int lowest)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *upper;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
@@ -2741,9 +2770,9 @@ static int do_relocation(struct btrfs_trans_handle *trans,
goto next;
}
- blocksize = root->nodesize;
+ blocksize = root->fs_info->nodesize;
generation = btrfs_node_ptr_generation(upper->eb, slot);
- eb = read_tree_block(root, bytenr, generation);
+ eb = read_tree_block(fs_info, bytenr, generation);
if (IS_ERR(eb)) {
err = PTR_ERR(eb);
goto next;
@@ -2772,7 +2801,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
trans->transid);
btrfs_mark_buffer_dirty(upper->eb);
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, root->fs_info,
node->eb->start, blocksize,
upper->eb->start,
btrfs_header_owner(upper->eb),
@@ -2854,7 +2883,7 @@ static void __mark_block_processed(struct reloc_control *rc,
u32 blocksize;
if (node->level == 0 ||
in_block_group(node->bytenr, rc->block_group)) {
- blocksize = rc->extent_root->nodesize;
+ blocksize = rc->extent_root->fs_info->nodesize;
mark_block_processed(rc, node->bytenr, blocksize);
}
node->processed = 1;
@@ -2894,7 +2923,7 @@ static void update_processed_blocks(struct reloc_control *rc,
static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
{
- u32 blocksize = rc->extent_root->nodesize;
+ u32 blocksize = rc->extent_root->fs_info->nodesize;
if (test_range_bit(&rc->processed_blocks, bytenr,
bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
@@ -2902,14 +2931,13 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
return 0;
}
-static int get_tree_block_key(struct reloc_control *rc,
+static int get_tree_block_key(struct btrfs_fs_info *fs_info,
struct tree_block *block)
{
struct extent_buffer *eb;
BUG_ON(block->key_ready);
- eb = read_tree_block(rc->extent_root, block->bytenr,
- block->key.offset);
+ eb = read_tree_block(fs_info, block->bytenr, block->key.offset);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
@@ -2988,6 +3016,7 @@ static noinline_for_stack
int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct rb_root *blocks)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *node;
struct btrfs_path *path;
struct tree_block *block;
@@ -3005,7 +3034,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready)
- readahead_tree_block(rc->extent_root, block->bytenr);
+ readahead_tree_block(fs_info, block->bytenr);
rb_node = rb_next(rb_node);
}
@@ -3013,7 +3042,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready) {
- err = get_tree_block_key(rc, block);
+ err = get_tree_block_key(fs_info, block);
if (err)
goto out_free_path;
}
@@ -3107,7 +3136,7 @@ static noinline_for_stack
int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
u64 block_start)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
int ret = 0;
@@ -3120,7 +3149,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
em->len = end + 1 - start;
em->block_len = em->len;
em->block_start = block_start;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
@@ -3141,6 +3170,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
static int relocate_file_extent_cluster(struct inode *inode,
struct file_extent_cluster *cluster)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 page_start;
u64 page_end;
u64 offset = BTRFS_I(inode)->index_cnt;
@@ -3236,7 +3266,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
index++;
balance_dirty_pages_ratelimited(inode->i_mapping);
- btrfs_throttle(BTRFS_I(inode)->root);
+ btrfs_throttle(fs_info);
}
WARN_ON(nr != cluster->nr);
out:
@@ -3376,7 +3406,7 @@ static int add_tree_block(struct reloc_control *rc,
return -ENOMEM;
block->bytenr = extent_key->objectid;
- block->key.objectid = rc->extent_root->nodesize;
+ block->key.objectid = rc->extent_root->fs_info->nodesize;
block->key.offset = generation;
block->level = level;
block->key_ready = 0;
@@ -3395,11 +3425,11 @@ static int __add_tree_block(struct reloc_control *rc,
u64 bytenr, u32 blocksize,
struct rb_root *blocks)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
int ret;
- bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
- SKINNY_METADATA);
+ bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
if (tree_block_processed(bytenr, rc))
return 0;
@@ -3465,7 +3495,7 @@ static int block_use_full_backref(struct reloc_control *rc,
btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
return 1;
- ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
+ ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
eb->start, btrfs_header_level(eb), 1,
NULL, &flags);
BUG_ON(ret);
@@ -3502,7 +3532,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
}
truncate:
- ret = btrfs_check_trunc_cache_free_space(root,
+ ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
if (ret)
goto out;
@@ -3515,8 +3545,8 @@ truncate:
ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode);
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
out:
iput(inode);
return ret;
@@ -3532,6 +3562,7 @@ static int find_data_references(struct reloc_control *rc,
struct btrfs_extent_data_ref *ref,
struct rb_root *blocks)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_path *path;
struct tree_block *block;
struct btrfs_root *root;
@@ -3558,8 +3589,7 @@ static int find_data_references(struct reloc_control *rc,
* it and redo the search.
*/
if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
- ret = delete_block_group_cache(rc->extent_root->fs_info,
- rc->block_group,
+ ret = delete_block_group_cache(fs_info, rc->block_group,
NULL, ref_objectid);
if (ret != -ENOENT)
return ret;
@@ -3571,7 +3601,7 @@ static int find_data_references(struct reloc_control *rc,
return -ENOMEM;
path->reada = READA_FORWARD;
- root = read_fs_root(rc->extent_root->fs_info, ref_root);
+ root = read_fs_root(fs_info, ref_root);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto out;
@@ -3706,7 +3736,7 @@ int add_data_references(struct reloc_control *rc,
struct btrfs_extent_inline_ref *iref;
unsigned long ptr;
unsigned long end;
- u32 blocksize = rc->extent_root->nodesize;
+ u32 blocksize = rc->extent_root->fs_info->nodesize;
int ret = 0;
int err = 0;
@@ -3797,6 +3827,7 @@ static noinline_for_stack
int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
struct btrfs_key *extent_key)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_key key;
struct extent_buffer *leaf;
u64 start, end, last;
@@ -3848,7 +3879,7 @@ next:
}
if (key.type == BTRFS_METADATA_ITEM_KEY &&
- key.objectid + rc->extent_root->nodesize <=
+ key.objectid + fs_info->nodesize <=
rc->search_start) {
path->slots[0]++;
goto next;
@@ -3866,7 +3897,7 @@ next:
rc->search_start = key.objectid + key.offset;
else
rc->search_start = key.objectid +
- rc->extent_root->nodesize;
+ fs_info->nodesize;
memcpy(extent_key, &key, sizeof(key));
return 0;
}
@@ -3913,7 +3944,7 @@ int prepare_to_relocate(struct reloc_control *rc)
struct btrfs_trans_handle *trans;
int ret;
- rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
+ rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
BTRFS_BLOCK_RSV_TEMP);
if (!rc->block_rsv)
return -ENOMEM;
@@ -3924,7 +3955,7 @@ int prepare_to_relocate(struct reloc_control *rc)
rc->nodes_relocated = 0;
rc->merging_rsv_size = 0;
rc->reserved_bytes = 0;
- rc->block_rsv->size = rc->extent_root->nodesize *
+ rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
RELOCATION_RESERVED_NODES;
ret = btrfs_block_rsv_refill(rc->extent_root,
rc->block_rsv, rc->block_rsv->size,
@@ -3945,96 +3976,13 @@ int prepare_to_relocate(struct reloc_control *rc)
*/
return PTR_ERR(trans);
}
- btrfs_commit_transaction(trans, rc->extent_root);
+ btrfs_commit_transaction(trans);
return 0;
}
-/*
- * Qgroup fixer for data chunk relocation.
- * The data relocation is done in the following steps
- * 1) Copy data extents into data reloc tree
- * 2) Create tree reloc tree(special snapshot) for related subvolumes
- * 3) Modify file extents in tree reloc tree
- * 4) Merge tree reloc tree with original fs tree, by swapping tree blocks
- *
- * The problem is, data and tree reloc tree are not accounted to qgroup,
- * and 4) will only info qgroup to track tree blocks change, not file extents
- * in the tree blocks.
- *
- * The good news is, related data extents are all in data reloc tree, so we
- * only need to info qgroup to track all file extents in data reloc tree
- * before commit trans.
- */
-static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
- struct reloc_control *rc)
-{
- struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- struct inode *inode = rc->data_inode;
- struct btrfs_root *data_reloc_root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
- struct btrfs_key key;
- int ret = 0;
-
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
- return 0;
-
- /*
- * Only for stage where we update data pointers the qgroup fix is
- * valid.
- * For MOVING_DATA stage, we will miss the timing of swapping tree
- * blocks, and won't fix it.
- */
- if (!(rc->stage == UPDATE_DATA_PTRS && rc->extents_found))
- return 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- key.objectid = btrfs_ino(inode);
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = 0;
-
- ret = btrfs_search_slot(NULL, data_reloc_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- lock_extent(&BTRFS_I(inode)->io_tree, 0, (u64)-1);
- while (1) {
- struct btrfs_file_extent_item *fi;
-
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.objectid > btrfs_ino(inode))
- break;
- if (key.type != BTRFS_EXTENT_DATA_KEY)
- goto next;
- fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(path->nodes[0], fi) !=
- BTRFS_FILE_EXTENT_REG)
- goto next;
- ret = btrfs_qgroup_insert_dirty_extent(trans, fs_info,
- btrfs_file_extent_disk_bytenr(path->nodes[0], fi),
- btrfs_file_extent_disk_num_bytes(path->nodes[0], fi),
- GFP_NOFS);
- if (ret < 0)
- break;
-next:
- ret = btrfs_next_item(data_reloc_root, path);
- if (ret < 0)
- break;
- if (ret > 0) {
- ret = 0;
- break;
- }
- }
- unlock_extent(&BTRFS_I(inode)->io_tree, 0 , (u64)-1);
-out:
- btrfs_free_path(path);
- return ret;
-}
-
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct rb_root blocks = RB_ROOT;
struct btrfs_key key;
struct btrfs_trans_handle *trans = NULL;
@@ -4075,7 +4023,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
restart:
if (update_backref_cache(trans, &rc->backref_cache)) {
- btrfs_end_transaction(trans, rc->extent_root);
+ btrfs_end_transaction(trans);
continue;
}
@@ -4163,8 +4111,8 @@ restart:
}
}
- btrfs_end_transaction_throttle(trans, rc->extent_root);
- btrfs_btree_balance_dirty(rc->extent_root);
+ btrfs_end_transaction_throttle(trans);
+ btrfs_btree_balance_dirty(fs_info);
trans = NULL;
if (rc->stage == MOVE_DATA_EXTENTS &&
@@ -4179,7 +4127,7 @@ restart:
}
}
if (trans && progress && err == -ENOSPC) {
- ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+ ret = btrfs_force_chunk_alloc(trans, fs_info,
rc->block_group->flags);
if (ret == 1) {
err = 0;
@@ -4192,8 +4140,8 @@ restart:
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
if (trans) {
- btrfs_end_transaction_throttle(trans, rc->extent_root);
- btrfs_btree_balance_dirty(rc->extent_root);
+ btrfs_end_transaction_throttle(trans);
+ btrfs_btree_balance_dirty(fs_info);
}
if (!err) {
@@ -4207,7 +4155,7 @@ restart:
set_reloc_control(rc);
backref_cache_cleanup(&rc->backref_cache);
- btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
err = prepare_to_merge(rc, err);
@@ -4215,7 +4163,7 @@ restart:
rc->merge_reloc_tree = 0;
unset_reloc_control(rc);
- btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
/* get rid of pinned extents */
trans = btrfs_join_transaction(rc->extent_root);
@@ -4223,16 +4171,9 @@ restart:
err = PTR_ERR(trans);
goto out_free;
}
- ret = qgroup_fix_relocated_data_extents(trans, rc);
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- if (!err)
- err = ret;
- goto out_free;
- }
- btrfs_commit_transaction(trans, rc->extent_root);
+ btrfs_commit_transaction(trans);
out_free:
- btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
+ btrfs_free_block_rsv(fs_info, rc->block_rsv);
btrfs_free_path(path);
return err;
}
@@ -4255,7 +4196,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
- memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
+ memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
btrfs_set_inode_generation(leaf, item, 1);
btrfs_set_inode_size(leaf, item, 0);
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
@@ -4300,14 +4241,14 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root, NULL);
BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
BTRFS_I(inode)->index_cnt = group->key.objectid;
err = btrfs_orphan_add(trans, inode);
out:
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
if (err) {
if (inode)
iput(inode);
@@ -4333,11 +4274,50 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
}
/*
+ * Print the block group being relocated
+ */
+static void describe_relocation(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group_cache *block_group)
+{
+ char buf[128]; /* prefixed by a '|' that'll be dropped */
+ u64 flags = block_group->flags;
+
+ /* Shouldn't happen */
+ if (!flags) {
+ strcpy(buf, "|NONE");
+ } else {
+ char *bp = buf;
+
+#define DESCRIBE_FLAG(f, d) \
+ if (flags & BTRFS_BLOCK_GROUP_##f) { \
+ bp += snprintf(bp, buf - bp + sizeof(buf), "|%s", d); \
+ flags &= ~BTRFS_BLOCK_GROUP_##f; \
+ }
+ DESCRIBE_FLAG(DATA, "data");
+ DESCRIBE_FLAG(SYSTEM, "system");
+ DESCRIBE_FLAG(METADATA, "metadata");
+ DESCRIBE_FLAG(RAID0, "raid0");
+ DESCRIBE_FLAG(RAID1, "raid1");
+ DESCRIBE_FLAG(DUP, "dup");
+ DESCRIBE_FLAG(RAID10, "raid10");
+ DESCRIBE_FLAG(RAID5, "raid5");
+ DESCRIBE_FLAG(RAID6, "raid6");
+ if (flags)
+ snprintf(buf, buf - bp + sizeof(buf), "|0x%llx", flags);
+#undef DESCRIBE_FLAG
+ }
+
+ btrfs_info(fs_info,
+ "relocating block group %llu flags %s",
+ block_group->key.objectid, buf + 1);
+}
+
+/*
* function to relocate all extents in a block group.
*/
-int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
{
- struct btrfs_fs_info *fs_info = extent_root->fs_info;
+ struct btrfs_root *extent_root = fs_info->extent_root;
struct reloc_control *rc;
struct inode *inode;
struct btrfs_path *path;
@@ -4388,9 +4368,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
goto out;
}
- btrfs_info(extent_root->fs_info,
- "relocating block group %llu flags %llu",
- rc->block_group->key.objectid, rc->block_group->flags);
+ describe_relocation(fs_info, rc->block_group);
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
@@ -4410,8 +4388,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
if (rc->extents_found == 0)
break;
- btrfs_info(extent_root->fs_info, "found %llu extents",
- rc->extents_found);
+ btrfs_info(fs_info, "found %llu extents", rc->extents_found);
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
ret = btrfs_wait_ordered_range(rc->data_inode, 0,
@@ -4431,7 +4408,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
out:
if (err && rw)
- btrfs_dec_block_group_ro(extent_root, rc->block_group);
+ btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
btrfs_put_block_group(rc->block_group);
kfree(rc);
@@ -4440,10 +4417,11 @@ out:
static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
int ret, err;
- trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
+ trans = btrfs_start_transaction(fs_info->tree_root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
@@ -4451,10 +4429,10 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
sizeof(root->root_item.drop_progress));
root->root_item.drop_level = 0;
btrfs_set_root_refs(&root->root_item, 0);
- ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
- err = btrfs_end_transaction(trans, root->fs_info->tree_root);
+ err = btrfs_end_transaction(trans);
if (err)
return err;
return ret;
@@ -4468,6 +4446,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
*/
int btrfs_recover_relocation(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(reloc_roots);
struct btrfs_key key;
struct btrfs_root *fs_root;
@@ -4489,7 +4468,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
key.offset = (u64)-1;
while (1) {
- ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key,
+ ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
path, 0, 0);
if (ret < 0) {
err = ret;
@@ -4517,7 +4496,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
list_add(&reloc_root->root_list, &reloc_roots);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
- fs_root = read_fs_root(root->fs_info,
+ fs_root = read_fs_root(fs_info,
reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
ret = PTR_ERR(fs_root);
@@ -4543,13 +4522,13 @@ int btrfs_recover_relocation(struct btrfs_root *root)
if (list_empty(&reloc_roots))
goto out;
- rc = alloc_reloc_control(root->fs_info);
+ rc = alloc_reloc_control(fs_info);
if (!rc) {
err = -ENOMEM;
goto out;
}
- rc->extent_root = root->fs_info->extent_root;
+ rc->extent_root = fs_info->extent_root;
set_reloc_control(rc);
@@ -4573,8 +4552,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
continue;
}
- fs_root = read_fs_root(root->fs_info,
- reloc_root->root_key.offset);
+ fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
err = PTR_ERR(fs_root);
goto out_free;
@@ -4585,7 +4563,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
fs_root->reloc_root = reloc_root;
}
- err = btrfs_commit_transaction(trans, rc->extent_root);
+ err = btrfs_commit_transaction(trans);
if (err)
goto out_free;
@@ -4598,12 +4576,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
err = PTR_ERR(trans);
goto out_free;
}
- err = qgroup_fix_relocated_data_extents(trans, rc);
- if (err < 0) {
- btrfs_abort_transaction(trans, err);
- goto out_free;
- }
- err = btrfs_commit_transaction(trans, rc->extent_root);
+ err = btrfs_commit_transaction(trans);
out_free:
kfree(rc);
out:
@@ -4614,8 +4587,7 @@ out:
if (err == 0) {
/* cleanup orphan inode in data relocation tree */
- fs_root = read_fs_root(root->fs_info,
- BTRFS_DATA_RELOC_TREE_OBJECTID);
+ fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
else
@@ -4632,9 +4604,9 @@ out:
*/
int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums;
struct btrfs_ordered_extent *ordered;
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
u64 disk_bytenr;
u64 new_bytenr;
@@ -4644,7 +4616,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
- ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
+ ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
disk_bytenr + len - 1, &list, 0);
if (ret)
goto out;
@@ -4679,13 +4651,14 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *cow)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct reloc_control *rc;
struct backref_node *node;
int first_cow = 0;
int level;
int ret = 0;
- rc = root->fs_info->reloc_ctl;
+ rc = fs_info->reloc_ctl;
if (!rc)
return 0;
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index edae751e870c..4c6735491ee0 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -132,6 +132,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, struct btrfs_root_item
*item)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *l;
int ret;
@@ -150,9 +151,8 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
}
if (ret != 0) {
- btrfs_print_leaf(root, path->nodes[0]);
- btrfs_crit(root->fs_info,
- "unable to update root key %llu %u %llu",
+ btrfs_print_leaf(fs_info, path->nodes[0]);
+ btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
key->objectid, key->type, key->offset);
BUG_ON(1);
}
@@ -216,8 +216,9 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return btrfs_insert_item(trans, root, key, item, sizeof(*item));
}
-int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
+int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *tree_root = fs_info->tree_root;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_key key;
@@ -227,7 +228,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
int ret;
bool can_recover = true;
- if (tree_root->fs_info->sb->s_flags & MS_RDONLY)
+ if (fs_info->sb->s_flags & MS_RDONLY)
can_recover = false;
path = btrfs_alloc_path();
@@ -275,8 +276,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
* in turn reads and inserts fs roots while doing backref
* walking.
*/
- root = btrfs_lookup_fs_root(tree_root->fs_info,
- root_key.objectid);
+ root = btrfs_lookup_fs_root(fs_info, root_key.objectid);
if (root) {
WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
&root->state));
@@ -297,15 +297,15 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
trans = btrfs_join_transaction(tree_root);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
- btrfs_handle_fs_error(tree_root->fs_info, err,
+ btrfs_handle_fs_error(fs_info, err,
"Failed to start trans to delete orphan item");
break;
}
err = btrfs_del_orphan_item(trans, tree_root,
root_key.objectid);
- btrfs_end_transaction(trans, tree_root);
+ btrfs_end_transaction(trans);
if (err) {
- btrfs_handle_fs_error(tree_root->fs_info, err,
+ btrfs_handle_fs_error(fs_info, err,
"Failed to delete root orphan item");
break;
}
@@ -320,7 +320,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
- err = btrfs_insert_fs_root(root->fs_info, root);
+ err = btrfs_insert_fs_root(fs_info, root);
if (err) {
BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root);
@@ -358,11 +358,12 @@ out:
}
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *tree_root,
+ struct btrfs_fs_info *fs_info,
u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
const char *name, int name_len)
{
+ struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_path *path;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
@@ -429,10 +430,11 @@ out:
* Will return 0, -ENOMEM, or anything from the CoW path
*/
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *tree_root,
+ struct btrfs_fs_info *fs_info,
u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
const char *name, int name_len)
{
+ struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_key key;
int ret;
struct btrfs_path *path;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ff3078234d94..9a94670536a6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -171,7 +171,7 @@ struct scrub_wr_ctx {
struct scrub_ctx {
struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
- struct btrfs_root *dev_root;
+ struct btrfs_fs_info *fs_info;
int first_free;
int curr;
atomic_t bios_in_flight;
@@ -356,7 +356,7 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
*/
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{
- struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
atomic_inc(&sctx->refs);
/*
@@ -388,7 +388,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
/* used for workers that require transaction commits */
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
{
- struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
/*
* see scrub_pending_trans_workers_inc() why we're pretending
@@ -458,7 +458,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
{
struct scrub_ctx *sctx;
int i;
- struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = dev->fs_info;
int ret;
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
@@ -468,7 +468,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx->is_dev_replace = is_dev_replace;
sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
sctx->curr = -1;
- sctx->dev_root = dev->dev_root;
+ sctx->fs_info = dev->fs_info;
for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
struct scrub_bio *sbio;
@@ -489,8 +489,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx->bios[i]->next_free = -1;
}
sctx->first_free = 0;
- sctx->nodesize = dev->dev_root->nodesize;
- sctx->sectorsize = dev->dev_root->sectorsize;
+ sctx->nodesize = fs_info->nodesize;
+ sctx->sectorsize = fs_info->sectorsize;
atomic_set(&sctx->bios_in_flight, 0);
atomic_set(&sctx->workers_pending, 0);
atomic_set(&sctx->cancel_req, 0);
@@ -524,7 +524,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
struct extent_buffer *eb;
struct btrfs_inode_item *inode_item;
struct scrub_warning *swarn = warn_ctx;
- struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
struct inode_fs_paths *ipath = NULL;
struct btrfs_root *local_root;
struct btrfs_key root_key;
@@ -618,7 +618,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
WARN_ON(sblock->page_count < 1);
dev = sblock->pagev[0]->dev;
- fs_info = sblock->sctx->dev_root->fs_info;
+ fs_info = sblock->sctx->fs_info;
path = btrfs_alloc_path();
if (!path)
@@ -789,6 +789,7 @@ out:
static void scrub_fixup_nodatasum(struct btrfs_work *work)
{
+ struct btrfs_fs_info *fs_info;
int ret;
struct scrub_fixup_nodatasum *fixup;
struct scrub_ctx *sctx;
@@ -798,6 +799,7 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
fixup = container_of(work, struct scrub_fixup_nodatasum, work);
sctx = fixup->sctx;
+ fs_info = fixup->root->fs_info;
path = btrfs_alloc_path();
if (!path) {
@@ -823,9 +825,8 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
* (once it's finished) and rewrite the failed sector if a good copy
* can be found.
*/
- ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
- path, scrub_fixup_readpage,
- fixup);
+ ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
+ scrub_fixup_readpage, fixup);
if (ret < 0) {
uncorrectable = 1;
goto out;
@@ -838,15 +839,14 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
out:
if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans, fixup->root);
+ btrfs_end_transaction(trans);
if (uncorrectable) {
spin_lock(&sctx->stat_lock);
++sctx->stat.uncorrectable_errors;
spin_unlock(&sctx->stat_lock);
btrfs_dev_replace_stats_inc(
- &sctx->dev_root->fs_info->dev_replace.
- num_uncorrectable_read_errors);
- btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
+ &fs_info->dev_replace.num_uncorrectable_read_errors);
+ btrfs_err_rl_in_rcu(fs_info,
"unable to fixup (nodatasum) error at logical %llu on dev %s",
fixup->logical, rcu_str_deref(fixup->dev->name));
}
@@ -898,7 +898,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
DEFAULT_RATELIMIT_BURST);
BUG_ON(sblock_to_check->page_count < 1);
- fs_info = sctx->dev_root->fs_info;
+ fs_info = sctx->fs_info;
if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
/*
* if we find an error in a super block, we just report it.
@@ -1177,9 +1177,7 @@ nodatasum_case:
if (scrub_write_page_to_dev_replace(sblock_other,
page_num) != 0) {
btrfs_dev_replace_stats_inc(
- &sctx->dev_root->
- fs_info->dev_replace.
- num_write_errors);
+ &fs_info->dev_replace.num_write_errors);
success = 0;
}
} else if (sblock_other) {
@@ -1302,7 +1300,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
struct scrub_block *sblocks_for_recheck)
{
struct scrub_ctx *sctx = original_sblock->sctx;
- struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 length = original_sblock->page_count * PAGE_SIZE;
u64 logical = original_sblock->pagev[0]->logical;
u64 generation = original_sblock->pagev[0]->generation;
@@ -1334,8 +1332,8 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
* with a length of PAGE_SIZE, each returned stripe
* represents one mirror
*/
- ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
- &mapped_length, &bbio, 0, 1);
+ ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
+ logical, &mapped_length, &bbio, 0, 1);
if (ret || !bbio || mapped_length < sublen) {
btrfs_put_bbio(bbio);
return -EIO;
@@ -1452,7 +1450,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio;
- ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
+ ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
page->recover->map_length,
page->mirror_num, 0);
if (ret)
@@ -1565,6 +1563,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
{
struct scrub_page *page_bad = sblock_bad->pagev[page_num];
struct scrub_page *page_good = sblock_good->pagev[page_num];
+ struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
BUG_ON(page_bad->page == NULL);
BUG_ON(page_good->page == NULL);
@@ -1574,7 +1573,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
int ret;
if (!page_bad->dev->bdev) {
- btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
+ btrfs_warn_rl(fs_info,
"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
return -EIO;
}
@@ -1596,8 +1595,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
btrfs_dev_replace_stats_inc(
- &sblock_bad->sctx->dev_root->fs_info->
- dev_replace.num_write_errors);
+ &fs_info->dev_replace.num_write_errors);
bio_put(bio);
return -EIO;
}
@@ -1609,6 +1607,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
+ struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
int page_num;
/*
@@ -1624,8 +1623,7 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
ret = scrub_write_page_to_dev_replace(sblock, page_num);
if (ret)
btrfs_dev_replace_stats_inc(
- &sblock->sctx->dev_root->fs_info->dev_replace.
- num_write_errors);
+ &fs_info->dev_replace.num_write_errors);
}
}
@@ -1740,7 +1738,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
static void scrub_wr_bio_end_io(struct bio *bio)
{
struct scrub_bio *sbio = bio->bi_private;
- struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
sbio->err = bio->bi_error;
sbio->bio = bio;
@@ -1759,7 +1757,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
if (sbio->err) {
struct btrfs_dev_replace *dev_replace =
- &sbio->sctx->dev_root->fs_info->dev_replace;
+ &sbio->sctx->fs_info->dev_replace;
for (i = 0; i < sbio->page_count; i++) {
struct scrub_page *spage = sbio->pagev[i];
@@ -1859,8 +1857,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
{
struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_header *h;
- struct btrfs_root *root = sctx->dev_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
u8 calculated_csum[BTRFS_CSUM_SIZE];
u8 on_disk_csum[BTRFS_CSUM_SIZE];
struct page *page;
@@ -2126,7 +2123,7 @@ again:
static void scrub_missing_raid56_end_io(struct bio *bio)
{
struct scrub_block *sblock = bio->bi_private;
- struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
if (bio->bi_error)
sblock->no_io_error_seen = 0;
@@ -2140,6 +2137,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
{
struct scrub_block *sblock = container_of(work, struct scrub_block, work);
struct scrub_ctx *sctx = sblock->sctx;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 logical;
struct btrfs_device *dev;
@@ -2153,14 +2151,14 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
spin_lock(&sctx->stat_lock);
sctx->stat.read_errors++;
spin_unlock(&sctx->stat_lock);
- btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
+ btrfs_err_rl_in_rcu(fs_info,
"IO error rebuilding logical %llu for dev %s",
logical, rcu_str_deref(dev->name));
} else if (sblock->header_error || sblock->checksum_error) {
spin_lock(&sctx->stat_lock);
sctx->stat.uncorrectable_errors++;
spin_unlock(&sctx->stat_lock);
- btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
+ btrfs_err_rl_in_rcu(fs_info,
"failed to rebuild valid logical %llu for dev %s",
logical, rcu_str_deref(dev->name));
} else {
@@ -2182,7 +2180,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
struct scrub_ctx *sctx = sblock->sctx;
- struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 length = sblock->page_count * PAGE_SIZE;
u64 logical = sblock->pagev[0]->logical;
struct btrfs_bio *bbio = NULL;
@@ -2191,8 +2189,8 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
int ret;
int i;
- ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
- &bbio, 0, 1);
+ ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
+ &length, &bbio, 0, 1);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
@@ -2215,7 +2213,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
- rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
+ rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
if (!rbio)
goto rbio_out;
@@ -2334,7 +2332,7 @@ leave_nomem:
static void scrub_bio_end_io(struct bio *bio)
{
struct scrub_bio *sbio = bio->bi_private;
- struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
sbio->err = bio->bi_error;
sbio->bio = bio;
@@ -2391,7 +2389,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
{
u32 offset;
int nsectors;
- int sectorsize = sparity->sctx->dev_root->sectorsize;
+ int sectorsize = sparity->sctx->fs_info->sectorsize;
if (len >= sparity->stripe_len) {
bitmap_set(bitmap, 0, sparity->nsectors);
@@ -2750,6 +2748,7 @@ static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
static void scrub_parity_bio_endio(struct bio *bio)
{
struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
+ struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
if (bio->bi_error)
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
@@ -2759,13 +2758,13 @@ static void scrub_parity_bio_endio(struct bio *bio)
btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
scrub_parity_bio_endio_worker, NULL, NULL);
- btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
- &sparity->work);
+ btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
}
static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
struct scrub_ctx *sctx = sparity->sctx;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
struct bio *bio;
struct btrfs_raid_bio *rbio;
struct scrub_page *spage;
@@ -2778,8 +2777,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
goto out;
length = sparity->logic_end - sparity->logic_start;
- ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
- sparity->logic_start,
+ ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
&length, &bbio, 0, 1);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
@@ -2792,7 +2790,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;
- rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
+ rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
length, sparity->scrub_dev,
sparity->dbitmap,
sparity->nsectors);
@@ -2844,7 +2842,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
u64 logic_start,
u64 logic_end)
{
- struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root;
struct btrfs_extent_item *extent;
@@ -2866,7 +2864,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
int extent_mirror_num;
int stop_loop = 0;
- nsectors = div_u64(map->stripe_len, root->sectorsize);
+ nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
GFP_NOFS);
@@ -2937,7 +2935,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
goto next;
if (key.type == BTRFS_METADATA_ITEM_KEY)
- bytes = root->nodesize;
+ bytes = fs_info->nodesize;
else
bytes = key.offset;
@@ -2988,8 +2986,9 @@ again:
mapped_length = extent_len;
bbio = NULL;
- ret = btrfs_map_block(fs_info, READ, extent_logical,
- &mapped_length, &bbio, 0);
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
+ extent_logical, &mapped_length, &bbio,
+ 0);
if (!ret) {
if (!bbio || mapped_length < extent_len)
ret = -EIO;
@@ -3068,7 +3067,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
int is_dev_replace)
{
struct btrfs_path *path, *ppath;
- struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root;
struct btrfs_extent_item *extent;
@@ -3289,7 +3288,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
goto next;
if (key.type == BTRFS_METADATA_ITEM_KEY)
- bytes = root->nodesize;
+ bytes = fs_info->nodesize;
else
bytes = key.offset;
@@ -3442,8 +3441,8 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
struct btrfs_block_group_cache *cache,
int is_dev_replace)
{
- struct btrfs_mapping_tree *map_tree =
- &sctx->dev_root->fs_info->mapping_tree;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
int i;
@@ -3496,8 +3495,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
{
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
- struct btrfs_root *root = sctx->dev_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_root *root = fs_info->dev_root;
u64 length;
u64 chunk_offset;
int ret = 0;
@@ -3617,8 +3616,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (IS_ERR(trans))
ret = PTR_ERR(trans);
else
- ret = btrfs_commit_transaction(trans,
- root);
+ ret = btrfs_commit_transaction(trans);
if (ret) {
scrub_pause_off(fs_info);
btrfs_put_block_group(cache);
@@ -3693,7 +3691,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
if (ro_set)
- btrfs_dec_block_group_ro(root, cache);
+ btrfs_dec_block_group_ro(cache);
/*
* We might have prevented the cleaner kthread from deleting
@@ -3746,16 +3744,16 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
u64 bytenr;
u64 gen;
int ret;
- struct btrfs_root *root = sctx->dev_root;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
- if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EIO;
/* Seed devices of a new filesystem has their own generation. */
- if (scrub_dev->fs_devices != root->fs_info->fs_devices)
+ if (scrub_dev->fs_devices != fs_info->fs_devices)
gen = scrub_dev->generation;
else
- gen = root->fs_info->last_trans_committed;
+ gen = fs_info->last_trans_committed;
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
@@ -3847,7 +3845,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (btrfs_fs_closing(fs_info))
return -EINVAL;
- if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
+ if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
/*
* in this case scrub is unable to calculate the checksum
* the way scrub is implemented. Do not handle this
@@ -3855,31 +3853,31 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
*/
btrfs_err(fs_info,
"scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
- fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
+ fs_info->nodesize,
+ BTRFS_STRIPE_LEN);
return -EINVAL;
}
- if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
+ if (fs_info->sectorsize != PAGE_SIZE) {
/* not supported for data w/o checksums */
btrfs_err_rl(fs_info,
"scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
- fs_info->chunk_root->sectorsize, PAGE_SIZE);
+ fs_info->sectorsize, PAGE_SIZE);
return -EINVAL;
}
- if (fs_info->chunk_root->nodesize >
+ if (fs_info->nodesize >
PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
- fs_info->chunk_root->sectorsize >
- PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
+ fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
/*
* would exhaust the array bounds of pagev member in
* struct scrub_block
*/
btrfs_err(fs_info,
"scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
- fs_info->chunk_root->nodesize,
+ fs_info->nodesize,
SCRUB_MAX_PAGES_PER_BLOCK,
- fs_info->chunk_root->sectorsize,
+ fs_info->sectorsize,
SCRUB_MAX_PAGES_PER_BLOCK);
return -EINVAL;
}
@@ -3979,10 +3977,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return ret;
}
-void btrfs_scrub_pause(struct btrfs_root *root)
+void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
mutex_lock(&fs_info->scrub_lock);
atomic_inc(&fs_info->scrub_pause_req);
while (atomic_read(&fs_info->scrubs_paused) !=
@@ -3996,10 +3992,8 @@ void btrfs_scrub_pause(struct btrfs_root *root)
mutex_unlock(&fs_info->scrub_lock);
}
-void btrfs_scrub_continue(struct btrfs_root *root)
+void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
atomic_dec(&fs_info->scrub_pause_req);
wake_up(&fs_info->scrub_pause_wait);
}
@@ -4048,19 +4042,19 @@ int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress)
{
struct btrfs_device *dev;
struct scrub_ctx *sctx = NULL;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ dev = btrfs_find_device(fs_info, devid, NULL, NULL);
if (dev)
sctx = dev->scrub_device;
if (sctx)
memcpy(progress, &sctx->stat, sizeof(*progress));
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
}
@@ -4076,7 +4070,7 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
int ret;
mapped_length = extent_len;
- ret = btrfs_map_block(fs_info, READ, extent_logical,
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
&mapped_length, &bbio, 0);
if (ret || !bbio || mapped_length < extent_len ||
!bbio->stripes[0].dev->bdev) {
@@ -4122,7 +4116,7 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
int mirror_num, u64 physical_for_dev_replace)
{
struct scrub_copy_nocow_ctx *nocow_ctx;
- struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
if (!nocow_ctx) {
@@ -4170,20 +4164,17 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
struct scrub_copy_nocow_ctx *nocow_ctx =
container_of(work, struct scrub_copy_nocow_ctx, work);
struct scrub_ctx *sctx = nocow_ctx->sctx;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_root *root = fs_info->extent_root;
u64 logical = nocow_ctx->logical;
u64 len = nocow_ctx->len;
int mirror_num = nocow_ctx->mirror_num;
u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
int ret;
struct btrfs_trans_handle *trans = NULL;
- struct btrfs_fs_info *fs_info;
struct btrfs_path *path;
- struct btrfs_root *root;
int not_written = 0;
- fs_info = sctx->dev_root->fs_info;
- root = fs_info->extent_root;
-
path = btrfs_alloc_path();
if (!path) {
spin_lock(&sctx->stat_lock);
@@ -4210,7 +4201,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
goto out;
}
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
trans = NULL;
while (!list_empty(&nocow_ctx->inodes)) {
struct scrub_nocow_inode *entry;
@@ -4238,7 +4229,7 @@ out:
kfree(entry);
}
if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
if (not_written)
btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
num_uncorrectable_read_errors);
@@ -4296,7 +4287,7 @@ out_unlock:
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
struct scrub_copy_nocow_ctx *nocow_ctx)
{
- struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
struct btrfs_key key;
struct inode *inode;
struct page *page;
@@ -4426,7 +4417,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
if (!dev)
return -EIO;
if (!dev->bdev) {
- btrfs_warn_rl(dev->dev_root->fs_info,
+ btrfs_warn_rl(dev->fs_info,
"scrub write_page_nocow(bdev == NULL) is unexpected");
return -EIO;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 71261b459863..d145ce804620 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1054,7 +1054,8 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
ret = -ENAMETOOLONG;
goto out;
}
- if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
+ if (name_len + data_len >
+ BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
ret = -E2BIG;
goto out;
}
@@ -1430,9 +1431,9 @@ static int find_extent_clone(struct send_ctx *sctx,
extent_item_pos = logical - found_key.objectid;
else
extent_item_pos = 0;
- ret = iterate_extent_inodes(fs_info,
- found_key.objectid, extent_item_pos, 1,
- __iterate_backrefs, backref_ctx);
+ ret = iterate_extent_inodes(fs_info, found_key.objectid,
+ extent_item_pos, 1, __iterate_backrefs,
+ backref_ctx);
if (ret < 0)
goto out;
@@ -3434,6 +3435,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
struct recorded_ref *parent_ref,
const bool is_orphan)
{
+ struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key di_key;
@@ -3462,8 +3464,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
goto out;
}
- di = btrfs_match_dir_item_name(sctx->parent_root, path,
- parent_ref->name, parent_ref->name_len);
+ di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
+ parent_ref->name_len);
if (!di) {
ret = 0;
goto out;
@@ -5264,7 +5266,7 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
u64 size = btrfs_file_extent_inline_len(path->nodes[0],
path->slots[0], fi);
extent_end = ALIGN(key.offset + size,
- sctx->send_root->sectorsize);
+ sctx->send_root->fs_info->sectorsize);
} else {
extent_end = key.offset +
btrfs_file_extent_num_bytes(path->nodes[0], fi);
@@ -5299,7 +5301,7 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
u64 size = btrfs_file_extent_inline_len(path->nodes[0],
path->slots[0], fi);
extent_end = ALIGN(key->offset + size,
- sctx->send_root->sectorsize);
+ sctx->send_root->fs_info->sectorsize);
} else {
extent_end = key->offset +
btrfs_file_extent_num_bytes(path->nodes[0], fi);
@@ -6110,7 +6112,7 @@ again:
goto commit_trans;
if (trans)
- return btrfs_end_transaction(trans, sctx->send_root);
+ return btrfs_end_transaction(trans);
return 0;
@@ -6123,7 +6125,7 @@ commit_trans:
goto again;
}
- return btrfs_commit_transaction(trans, sctx->send_root);
+ return btrfs_commit_transaction(trans);
}
static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
@@ -6136,17 +6138,17 @@ static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
*/
if (root->send_in_progress < 0)
btrfs_err(root->fs_info,
- "send_in_progres unbalanced %d root %llu",
- root->send_in_progress, root->root_key.objectid);
+ "send_in_progres unbalanced %d root %llu",
+ root->send_in_progress, root->root_key.objectid);
spin_unlock(&root->root_item_lock);
}
long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
{
int ret = 0;
- struct btrfs_root *send_root;
+ struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
+ struct btrfs_fs_info *fs_info = send_root->fs_info;
struct btrfs_root *clone_root;
- struct btrfs_fs_info *fs_info;
struct btrfs_ioctl_send_args *arg = NULL;
struct btrfs_key key;
struct send_ctx *sctx = NULL;
@@ -6160,9 +6162,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- send_root = BTRFS_I(file_inode(mnt_file))->root;
- fs_info = send_root->fs_info;
-
/*
* The subvolume must remain read-only during send, protect against
* making it RW. This also protects against deletion.
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3b713b6fcc26..b5ae7d3d1896 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -303,7 +303,7 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
static void btrfs_put_super(struct super_block *sb)
{
- close_ctree(btrfs_sb(sb)->tree_root);
+ close_ctree(btrfs_sb(sb));
}
enum {
@@ -394,10 +394,9 @@ static const match_table_t tokens = {
* reading in a new superblock is parsed here.
* XXX JDM: This needs to be cleaned up for remount.
*/
-int btrfs_parse_options(struct btrfs_root *root, char *options,
+int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags)
{
- struct btrfs_fs_info *info = root->fs_info;
substring_t args[MAX_OPT_ARGS];
char *p, *num, *orig = NULL;
u64 cache_gen;
@@ -409,8 +408,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
bool saved_compress_force;
int no_compress = 0;
- cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
- if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE))
+ cache_gen = btrfs_super_cache_generation(info->super_copy);
+ if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
else if (cache_gen)
btrfs_set_opt(info->mount_opt, SPACE_CACHE);
@@ -440,7 +439,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
token = match_token(p, tokens, args);
switch (token) {
case Opt_degraded:
- btrfs_info(root->fs_info, "allowing degraded mounts");
+ btrfs_info(info, "allowing degraded mounts");
btrfs_set_opt(info->mount_opt, DEGRADED);
break;
case Opt_subvol:
@@ -459,11 +458,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_datasum:
if (btrfs_test_opt(info, NODATASUM)) {
if (btrfs_test_opt(info, NODATACOW))
- btrfs_info(root->fs_info,
+ btrfs_info(info,
"setting datasum, datacow enabled");
else
- btrfs_info(root->fs_info,
- "setting datasum");
+ btrfs_info(info, "setting datasum");
}
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -472,11 +470,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
if (!btrfs_test_opt(info, NODATACOW)) {
if (!btrfs_test_opt(info, COMPRESS) ||
!btrfs_test_opt(info, FORCE_COMPRESS)) {
- btrfs_info(root->fs_info,
+ btrfs_info(info,
"setting nodatacow, compression disabled");
} else {
- btrfs_info(root->fs_info,
- "setting nodatacow");
+ btrfs_info(info, "setting nodatacow");
}
}
btrfs_clear_opt(info->mount_opt, COMPRESS);
@@ -543,8 +540,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
compress_force != saved_compress_force)) ||
(!btrfs_test_opt(info, COMPRESS) &&
no_compress == 1)) {
- btrfs_info(root->fs_info,
- "%s %s compression",
+ btrfs_info(info, "%s %s compression",
(compress_force) ? "force" : "use",
compress_type);
}
@@ -592,10 +588,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
if (info->max_inline) {
info->max_inline = min_t(u64,
info->max_inline,
- root->sectorsize);
+ info->sectorsize);
}
- btrfs_info(root->fs_info, "max_inline at %llu",
- info->max_inline);
+ btrfs_info(info, "max_inline at %llu",
+ info->max_inline);
} else {
ret = -ENOMEM;
goto out;
@@ -608,8 +604,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
info->alloc_start = memparse(num, NULL);
mutex_unlock(&info->chunk_mutex);
kfree(num);
- btrfs_info(root->fs_info,
- "allocations start at %llu",
+ btrfs_info(info, "allocations start at %llu",
info->alloc_start);
} else {
ret = -ENOMEM;
@@ -618,16 +613,15 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
case Opt_acl:
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
- root->fs_info->sb->s_flags |= MS_POSIXACL;
+ info->sb->s_flags |= MS_POSIXACL;
break;
#else
- btrfs_err(root->fs_info,
- "support for ACL not compiled in!");
+ btrfs_err(info, "support for ACL not compiled in!");
ret = -EINVAL;
goto out;
#endif
case Opt_noacl:
- root->fs_info->sb->s_flags &= ~MS_POSIXACL;
+ info->sb->s_flags &= ~MS_POSIXACL;
break;
case Opt_notreelog:
btrfs_set_and_info(info, NOTREELOG,
@@ -656,8 +650,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
goto out;
} else if (intarg >= 0) {
info->metadata_ratio = intarg;
- btrfs_info(root->fs_info, "metadata ratio %d",
- info->metadata_ratio);
+ btrfs_info(info, "metadata ratio %d",
+ info->metadata_ratio);
} else {
ret = -EINVAL;
goto out;
@@ -675,15 +669,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_space_cache_version:
if (token == Opt_space_cache ||
strcmp(args[0].from, "v1") == 0) {
- btrfs_clear_opt(root->fs_info->mount_opt,
+ btrfs_clear_opt(info->mount_opt,
FREE_SPACE_TREE);
btrfs_set_and_info(info, SPACE_CACHE,
- "enabling disk space caching");
+ "enabling disk space caching");
} else if (strcmp(args[0].from, "v2") == 0) {
- btrfs_clear_opt(root->fs_info->mount_opt,
+ btrfs_clear_opt(info->mount_opt,
SPACE_CACHE);
- btrfs_set_and_info(info,
- FREE_SPACE_TREE,
+ btrfs_set_and_info(info, FREE_SPACE_TREE,
"enabling free space tree");
} else {
ret = -EINVAL;
@@ -695,14 +688,12 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
case Opt_no_space_cache:
if (btrfs_test_opt(info, SPACE_CACHE)) {
- btrfs_clear_and_info(info,
- SPACE_CACHE,
- "disabling disk space caching");
+ btrfs_clear_and_info(info, SPACE_CACHE,
+ "disabling disk space caching");
}
if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
- btrfs_clear_and_info(info,
- FREE_SPACE_TREE,
- "disabling free space tree");
+ btrfs_clear_and_info(info, FREE_SPACE_TREE,
+ "disabling free space tree");
}
break;
case Opt_inode_cache:
@@ -735,10 +726,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
"disabling auto defrag");
break;
case Opt_recovery:
- btrfs_warn(root->fs_info,
+ btrfs_warn(info,
"'recovery' is deprecated, use 'usebackuproot' instead");
case Opt_usebackuproot:
- btrfs_info(root->fs_info,
+ btrfs_info(info,
"trying to use backup root at mount time");
btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
break;
@@ -747,14 +738,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
case Opt_check_integrity_including_extent_data:
- btrfs_info(root->fs_info,
+ btrfs_info(info,
"enabling check integrity including extent data");
btrfs_set_opt(info->mount_opt,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
case Opt_check_integrity:
- btrfs_info(root->fs_info, "enabling check integrity");
+ btrfs_info(info, "enabling check integrity");
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
case Opt_check_integrity_print_mask:
@@ -763,7 +754,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
goto out;
} else if (intarg >= 0) {
info->check_integrity_print_mask = intarg;
- btrfs_info(root->fs_info,
+ btrfs_info(info,
"check_integrity_print_mask 0x%x",
info->check_integrity_print_mask);
} else {
@@ -775,8 +766,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_check_integrity_including_extent_data:
case Opt_check_integrity:
case Opt_check_integrity_print_mask:
- btrfs_err(root->fs_info,
- "support for check_integrity* not compiled in!");
+ btrfs_err(info,
+ "support for check_integrity* not compiled in!");
ret = -EINVAL;
goto out;
#endif
@@ -796,20 +787,19 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
intarg = 0;
ret = match_int(&args[0], &intarg);
if (ret < 0) {
- btrfs_err(root->fs_info,
- "invalid commit interval");
+ btrfs_err(info, "invalid commit interval");
ret = -EINVAL;
goto out;
}
if (intarg > 0) {
if (intarg > 300) {
- btrfs_warn(root->fs_info,
+ btrfs_warn(info,
"excessive commit interval %d",
intarg);
}
info->commit_interval = intarg;
} else {
- btrfs_info(root->fs_info,
+ btrfs_info(info,
"using default commit interval %ds",
BTRFS_DEFAULT_COMMIT_INTERVAL);
info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
@@ -817,23 +807,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
#ifdef CONFIG_BTRFS_DEBUG
case Opt_fragment_all:
- btrfs_info(root->fs_info, "fragmenting all space");
+ btrfs_info(info, "fragmenting all space");
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
break;
case Opt_fragment_metadata:
- btrfs_info(root->fs_info, "fragmenting metadata");
+ btrfs_info(info, "fragmenting metadata");
btrfs_set_opt(info->mount_opt,
FRAGMENT_METADATA);
break;
case Opt_fragment_data:
- btrfs_info(root->fs_info, "fragmenting data");
+ btrfs_info(info, "fragmenting data");
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
break;
#endif
case Opt_err:
- btrfs_info(root->fs_info,
- "unrecognized mount option '%s'", p);
+ btrfs_info(info, "unrecognized mount option '%s'", p);
ret = -EINVAL;
goto out;
default:
@@ -845,22 +834,22 @@ check:
* Extra check for current option against current flag
*/
if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
- btrfs_err(root->fs_info,
+ btrfs_err(info,
"nologreplay must be used with ro mount option");
ret = -EINVAL;
}
out:
- if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE) &&
+ if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
!btrfs_test_opt(info, FREE_SPACE_TREE) &&
!btrfs_test_opt(info, CLEAR_CACHE)) {
- btrfs_err(root->fs_info, "cannot disable free space tree");
+ btrfs_err(info, "cannot disable free space tree");
ret = -EINVAL;
}
if (!ret && btrfs_test_opt(info, SPACE_CACHE))
- btrfs_info(root->fs_info, "disk space caching is enabled");
+ btrfs_info(info, "disk space caching is enabled");
if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
- btrfs_info(root->fs_info, "using free space tree");
+ btrfs_info(info, "using free space tree");
kfree(orig);
return ret;
}
@@ -1171,7 +1160,7 @@ static int btrfs_fill_super(struct super_block *sb,
return 0;
fail_close:
- close_ctree(fs_info->tree_root);
+ close_ctree(fs_info);
return err;
}
@@ -1215,13 +1204,12 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
if (IS_ERR(trans))
return PTR_ERR(trans);
}
- return btrfs_commit_transaction(trans, root);
+ return btrfs_commit_transaction(trans);
}
static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
- struct btrfs_root *root = info->tree_root;
char *compress_type;
if (btrfs_test_opt(info, DEGRADED))
@@ -1263,7 +1251,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(info, DISCARD))
seq_puts(seq, ",discard");
- if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
+ if (!(info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
if (btrfs_test_opt(info, SPACE_CACHE))
seq_puts(seq, ",space_cache");
@@ -1742,7 +1730,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
}
}
- ret = btrfs_parse_options(root, data, *flags);
+ ret = btrfs_parse_options(fs_info, data, *flags);
if (ret) {
ret = -EINVAL;
goto restore;
@@ -1782,11 +1770,11 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_scrub_cancel(fs_info);
btrfs_pause_balance(fs_info);
- ret = btrfs_commit_super(root);
+ ret = btrfs_commit_super(fs_info);
if (ret)
goto restore;
} else {
- if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_err(fs_info,
"Remounting read-write after error is not allowed");
ret = -EINVAL;
@@ -1899,9 +1887,10 @@ static inline void btrfs_descending_sort_devices(
* The helper to calc the free space on the devices that can be used to store
* file data.
*/
-static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
+static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
+ u64 *free_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *root = fs_info->tree_root;
struct btrfs_device_info *devices_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
@@ -2084,10 +2073,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 thresh = 0;
int mixed = 0;
- /*
- * holding chunk_mutex to avoid allocating new chunks, holding
- * device_list_mutex to avoid the device being removed
- */
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
@@ -2139,7 +2124,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
spin_unlock(&block_rsv->lock);
buf->f_bavail = div_u64(total_free_data, factor);
- ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
+ ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
if (ret)
return ret;
buf->f_bavail += div_u64(total_free_data, factor);
@@ -2247,9 +2232,10 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
static int btrfs_freeze(struct super_block *sb)
{
struct btrfs_trans_handle *trans;
- struct btrfs_root *root = btrfs_sb(sb)->tree_root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ struct btrfs_root *root = fs_info->tree_root;
- root->fs_info->fs_frozen = 1;
+ fs_info->fs_frozen = 1;
/*
* We don't need a barrier here, we'll wait for any transaction that
* could be in progress on other threads (and do delayed iputs that
@@ -2263,14 +2249,12 @@ static int btrfs_freeze(struct super_block *sb)
return 0;
return PTR_ERR(trans);
}
- return btrfs_commit_transaction(trans, root);
+ return btrfs_commit_transaction(trans);
}
static int btrfs_unfreeze(struct super_block *sb)
{
- struct btrfs_root *root = btrfs_sb(sb)->tree_root;
-
- root->fs_info->fs_frozen = 0;
+ btrfs_sb(sb)->fs_frozen = 0;
return 0;
}
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 00ee006a8aa2..ea272432c930 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -79,7 +79,7 @@ static void btrfs_destroy_test_fs(void)
unregister_filesystem(&test_type);
}
-struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
+struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
{
struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
GFP_KERNEL);
@@ -100,6 +100,9 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
return NULL;
}
+ fs_info->nodesize = nodesize;
+ fs_info->sectorsize = sectorsize;
+
if (init_srcu_struct(&fs_info->subvol_srcu)) {
kfree(fs_info->fs_devices);
kfree(fs_info->super_copy);
@@ -190,7 +193,8 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
}
struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
+btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
+ unsigned long length)
{
struct btrfs_block_group_cache *cache;
@@ -207,8 +211,9 @@ btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
cache->key.objectid = 0;
cache->key.offset = length;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
- cache->sectorsize = sectorsize;
- cache->full_stripe_len = sectorsize;
+ cache->sectorsize = fs_info->sectorsize;
+ cache->full_stripe_len = fs_info->sectorsize;
+ cache->fs_info = fs_info;
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index b17ffbe8f9f3..266f1e3d1784 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -34,11 +34,11 @@ int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
struct inode *btrfs_new_test_inode(void);
-struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
+struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
void btrfs_free_dummy_root(struct btrfs_root *root);
struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
+btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length);
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
#else
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index 199569174637..b9142c614114 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -41,13 +41,13 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
test_msg("Running btrfs_split_item tests\n");
- fs_info = btrfs_alloc_dummy_fs_info();
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Could not allocate fs_info\n");
return -ENOMEM;
}
- root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Could not allocate root\n");
ret = PTR_ERR(root);
@@ -61,8 +61,7 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
- path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
- nodesize);
+ path->nodes[0] = eb = alloc_dummy_extent_buffer(fs_info, nodesize);
if (!eb) {
test_msg("Could not allocate dummy buffer\n");
ret = -ENOMEM;
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index caad80bb9bd0..133753232a94 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -306,7 +306,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
int ret;
memset(bitmap, 0, len);
- memset_extent_buffer(eb, 0, 0, len);
+ memzero_extent_buffer(eb, 0, len);
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
test_msg("Bitmap was not zeroed\n");
return -EINVAL;
@@ -383,6 +383,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info;
unsigned long len;
unsigned long *bitmap;
struct extent_buffer *eb;
@@ -397,13 +398,15 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
? sectorsize * 4 : sectorsize;
+ fs_info = btrfs_alloc_dummy_fs_info(len, len);
+
bitmap = kmalloc(len, GFP_KERNEL);
if (!bitmap) {
test_msg("Couldn't allocate test bitmap\n");
return -ENOMEM;
}
- eb = __alloc_dummy_extent_buffer(NULL, 0, len);
+ eb = __alloc_dummy_extent_buffer(fs_info, 0, len);
if (!eb) {
test_msg("Couldn't allocate test extent buffer\n");
kfree(bitmap);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 3221c8dee272..eca6412d42bd 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -843,33 +843,31 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
int ret = -ENOMEM;
test_msg("Running btrfs free space cache tests\n");
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info)
+ return -ENOMEM;
+
/*
* For ppc64 (with 64k page size), bytes per bitmap might be
* larger than 1G. To make bitmap test available in ppc64,
* alloc dummy block group whose size cross bitmaps.
*/
- cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
- + PAGE_SIZE, sectorsize);
+ cache = btrfs_alloc_dummy_block_group(fs_info,
+ BITS_PER_BITMAP * sectorsize + PAGE_SIZE);
if (!cache) {
test_msg("Couldn't run the tests\n");
+ btrfs_free_dummy_fs_info(fs_info);
return 0;
}
- fs_info = btrfs_alloc_dummy_fs_info();
- if (!fs_info) {
- ret = -ENOMEM;
- goto out;
- }
-
- root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto out;
}
root->fs_info->extent_root = root;
- cache->fs_info = root->fs_info;
ret = test_extents(cache);
if (ret)
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 6e144048a72e..b29954c01673 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -455,14 +455,14 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
struct btrfs_path *path = NULL;
int ret;
- fs_info = btrfs_alloc_dummy_fs_info();
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
ret = -ENOMEM;
goto out;
}
- root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate dummy root\n");
ret = PTR_ERR(root);
@@ -474,8 +474,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
root->fs_info->free_space_root = root;
root->fs_info->tree_root = root;
- root->node = alloc_test_extent_buffer(root->fs_info,
- nodesize, nodesize);
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
@@ -485,7 +484,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
btrfs_set_header_nritems(root->node, 0);
root->alloc_bytenr += 2 * nodesize;
- cache = btrfs_alloc_dummy_block_group(8 * alignment, sectorsize);
+ cache = btrfs_alloc_dummy_block_group(fs_info, 8 * alignment);
if (!cache) {
test_msg("Couldn't allocate dummy block group cache\n");
ret = -ENOMEM;
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 0bf46808ce8f..4d0f038e14f1 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -249,19 +249,19 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- fs_info = btrfs_alloc_dummy_fs_info();
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
}
- root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
+ root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
@@ -854,19 +854,19 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- fs_info = btrfs_alloc_dummy_fs_info();
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
}
- root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
+ root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
@@ -950,13 +950,13 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
return ret;
}
- fs_info = btrfs_alloc_dummy_fs_info();
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ca7cb5e6d385..0f4ce970d195 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -458,13 +458,13 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
struct btrfs_root *tmp_root;
int ret = 0;
- fs_info = btrfs_alloc_dummy_fs_info();
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
return -ENOMEM;
}
- root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
ret = PTR_ERR(root);
@@ -486,8 +486,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
* Can't use bytenr 0, some things freak out
* *cough*backref walking code*cough*
*/
- root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
- nodesize);
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
@@ -497,7 +496,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
btrfs_set_header_nritems(root->node, 0);
root->alloc_bytenr += 2 * nodesize;
- tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ tmp_root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
@@ -512,7 +511,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
goto out;
}
- tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ tmp_root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 9517de0e668c..0e0508f488b2 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -184,10 +184,10 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
/*
* either allocate a new transaction or hop into the existing one
*/
-static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
+static noinline int join_transaction(struct btrfs_fs_info *fs_info,
+ unsigned int type)
{
struct btrfs_transaction *cur_trans;
- struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->trans_lock);
loop:
@@ -314,9 +314,11 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int force)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
root->last_trans < trans->transid) || force) {
- WARN_ON(root == root->fs_info->extent_root);
+ WARN_ON(root == fs_info->extent_root);
WARN_ON(root->commit_root != root->node);
/*
@@ -331,15 +333,15 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
*/
smp_wmb();
- spin_lock(&root->fs_info->fs_roots_radix_lock);
+ spin_lock(&fs_info->fs_roots_radix_lock);
if (root->last_trans == trans->transid && !force) {
- spin_unlock(&root->fs_info->fs_roots_radix_lock);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
return 0;
}
- radix_tree_tag_set(&root->fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
- BTRFS_ROOT_TRANS_TAG);
- spin_unlock(&root->fs_info->fs_roots_radix_lock);
+ radix_tree_tag_set(&fs_info->fs_roots_radix,
+ (unsigned long)root->root_key.objectid,
+ BTRFS_ROOT_TRANS_TAG);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
root->last_trans = trans->transid;
/* this is pretty tricky. We don't want to
@@ -372,6 +374,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
/* Add ourselves to the transaction dropped list */
@@ -380,16 +383,18 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
spin_unlock(&cur_trans->dropped_roots_lock);
/* Make sure we don't try to update the root at commit time */
- spin_lock(&root->fs_info->fs_roots_radix_lock);
- radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
- spin_unlock(&root->fs_info->fs_roots_radix_lock);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
}
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
return 0;
@@ -402,9 +407,9 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
!test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
return 0;
- mutex_lock(&root->fs_info->reloc_mutex);
+ mutex_lock(&fs_info->reloc_mutex);
record_root_in_trans(trans, root, 0);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
return 0;
}
@@ -420,35 +425,35 @@ static inline int is_transaction_blocked(struct btrfs_transaction *trans)
* when this is done, it is safe to start a new transaction, but the current
* transaction might not be fully on disk.
*/
-static void wait_current_trans(struct btrfs_root *root)
+static void wait_current_trans(struct btrfs_fs_info *fs_info)
{
struct btrfs_transaction *cur_trans;
- spin_lock(&root->fs_info->trans_lock);
- cur_trans = root->fs_info->running_transaction;
+ spin_lock(&fs_info->trans_lock);
+ cur_trans = fs_info->running_transaction;
if (cur_trans && is_transaction_blocked(cur_trans)) {
atomic_inc(&cur_trans->use_count);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
- wait_event(root->fs_info->transaction_wait,
+ wait_event(fs_info->transaction_wait,
cur_trans->state >= TRANS_STATE_UNBLOCKED ||
cur_trans->aborted);
btrfs_put_transaction(cur_trans);
} else {
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
}
}
-static int may_wait_transaction(struct btrfs_root *root, int type)
+static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
{
- if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return 0;
if (type == TRANS_USERSPACE)
return 1;
if (type == TRANS_START &&
- !atomic_read(&root->fs_info->open_ioctl_trans))
+ !atomic_read(&fs_info->open_ioctl_trans))
return 1;
return 0;
@@ -456,7 +461,9 @@ static int may_wait_transaction(struct btrfs_root *root, int type)
static inline bool need_reserve_reloc_root(struct btrfs_root *root)
{
- if (!root->fs_info->reloc_ctl ||
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ if (!fs_info->reloc_ctl ||
!test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
root->reloc_root)
@@ -469,6 +476,8 @@ static struct btrfs_trans_handle *
start_transaction(struct btrfs_root *root, unsigned int num_items,
unsigned int type, enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
u64 num_bytes = 0;
@@ -479,7 +488,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
/* Send isn't supposed to start transactions. */
ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
- if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return ERR_PTR(-EROFS);
if (current->journal_info) {
@@ -496,23 +505,22 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
* Do the reservation before we join the transaction so we can do all
* the appropriate flushing if need be.
*/
- if (num_items > 0 && root != root->fs_info->chunk_root) {
- qgroup_reserved = num_items * root->nodesize;
+ if (num_items > 0 && root != fs_info->chunk_root) {
+ qgroup_reserved = num_items * fs_info->nodesize;
ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved);
if (ret)
return ERR_PTR(ret);
- num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
/*
* Do the reservation for the relocation root creation
*/
if (need_reserve_reloc_root(root)) {
- num_bytes += root->nodesize;
+ num_bytes += fs_info->nodesize;
reloc_reserved = true;
}
- ret = btrfs_block_rsv_add(root,
- &root->fs_info->trans_block_rsv,
+ ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
num_bytes, flush);
if (ret)
goto reserve_fail;
@@ -535,15 +543,15 @@ again:
* transaction and commit it, so we needn't do sb_start_intwrite().
*/
if (type & __TRANS_FREEZABLE)
- sb_start_intwrite(root->fs_info->sb);
+ sb_start_intwrite(fs_info->sb);
- if (may_wait_transaction(root, type))
- wait_current_trans(root);
+ if (may_wait_transaction(fs_info, type))
+ wait_current_trans(fs_info);
do {
- ret = join_transaction(root, type);
+ ret = join_transaction(fs_info, type);
if (ret == -EBUSY) {
- wait_current_trans(root);
+ wait_current_trans(fs_info);
if (unlikely(type == TRANS_ATTACH))
ret = -ENOENT;
}
@@ -552,7 +560,7 @@ again:
if (ret < 0)
goto join_fail;
- cur_trans = root->fs_info->running_transaction;
+ cur_trans = fs_info->running_transaction;
h->transid = cur_trans->transid;
h->transaction = cur_trans;
@@ -567,16 +575,16 @@ again:
smp_mb();
if (cur_trans->state >= TRANS_STATE_BLOCKED &&
- may_wait_transaction(root, type)) {
+ may_wait_transaction(fs_info, type)) {
current->journal_info = h;
- btrfs_commit_transaction(h, root);
+ btrfs_commit_transaction(h);
goto again;
}
if (num_bytes) {
- trace_btrfs_space_reservation(root->fs_info, "transaction",
+ trace_btrfs_space_reservation(fs_info, "transaction",
h->transid, num_bytes, 1);
- h->block_rsv = &root->fs_info->trans_block_rsv;
+ h->block_rsv = &fs_info->trans_block_rsv;
h->bytes_reserved = num_bytes;
h->reloc_reserved = reloc_reserved;
}
@@ -590,11 +598,11 @@ got_it:
join_fail:
if (type & __TRANS_FREEZABLE)
- sb_end_intwrite(root->fs_info->sb);
+ sb_end_intwrite(fs_info->sb);
kmem_cache_free(btrfs_trans_handle_cachep, h);
alloc_fail:
if (num_bytes)
- btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
+ btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
num_bytes);
reserve_fail:
btrfs_qgroup_free_meta(root, qgroup_reserved);
@@ -612,6 +620,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
unsigned int num_items,
int min_factor)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
u64 num_bytes;
int ret;
@@ -624,19 +633,17 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
if (IS_ERR(trans))
return trans;
- num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
- ret = btrfs_cond_migrate_bytes(root->fs_info,
- &root->fs_info->trans_block_rsv,
- num_bytes,
- min_factor);
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
+ ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
+ num_bytes, min_factor);
if (ret) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ERR_PTR(ret);
}
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
trans->bytes_reserved = num_bytes;
- trace_btrfs_space_reservation(root->fs_info, "transaction",
+ trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid, num_bytes, 1);
return trans;
@@ -702,30 +709,29 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH);
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
- btrfs_wait_for_commit(root, 0);
+ btrfs_wait_for_commit(root->fs_info, 0);
return trans;
}
/* wait for a transaction commit to be fully complete */
-static noinline void wait_for_commit(struct btrfs_root *root,
- struct btrfs_transaction *commit)
+static noinline void wait_for_commit(struct btrfs_transaction *commit)
{
wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
}
-int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
{
struct btrfs_transaction *cur_trans = NULL, *t;
int ret = 0;
if (transid) {
- if (transid <= root->fs_info->last_trans_committed)
+ if (transid <= fs_info->last_trans_committed)
goto out;
/* find specified transaction */
- spin_lock(&root->fs_info->trans_lock);
- list_for_each_entry(t, &root->fs_info->trans_list, list) {
+ spin_lock(&fs_info->trans_lock);
+ list_for_each_entry(t, &fs_info->trans_list, list) {
if (t->transid == transid) {
cur_trans = t;
atomic_inc(&cur_trans->use_count);
@@ -737,21 +743,21 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
break;
}
}
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
/*
* The specified transaction doesn't exist, or we
* raced with btrfs_commit_transaction
*/
if (!cur_trans) {
- if (transid > root->fs_info->last_trans_committed)
+ if (transid > fs_info->last_trans_committed)
ret = -EINVAL;
goto out;
}
} else {
/* find newest transaction that is committing | committed */
- spin_lock(&root->fs_info->trans_lock);
- list_for_each_entry_reverse(t, &root->fs_info->trans_list,
+ spin_lock(&fs_info->trans_lock);
+ list_for_each_entry_reverse(t, &fs_info->trans_list,
list) {
if (t->state >= TRANS_STATE_COMMIT_START) {
if (t->state == TRANS_STATE_COMPLETED)
@@ -761,37 +767,38 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
break;
}
}
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
if (!cur_trans)
goto out; /* nothing committing|committed */
}
- wait_for_commit(root, cur_trans);
+ wait_for_commit(cur_trans);
btrfs_put_transaction(cur_trans);
out:
return ret;
}
-void btrfs_throttle(struct btrfs_root *root)
+void btrfs_throttle(struct btrfs_fs_info *fs_info)
{
- if (!atomic_read(&root->fs_info->open_ioctl_trans))
- wait_current_trans(root);
+ if (!atomic_read(&fs_info->open_ioctl_trans))
+ wait_current_trans(fs_info);
}
-static int should_end_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+static int should_end_transaction(struct btrfs_trans_handle *trans)
{
- if (root->fs_info->global_block_rsv.space_info->full &&
- btrfs_check_space_for_delayed_refs(trans, root))
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+
+ if (fs_info->global_block_rsv.space_info->full &&
+ btrfs_check_space_for_delayed_refs(trans, fs_info))
return 1;
- return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
+ return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
}
-int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_transaction *cur_trans = trans->transaction;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int updates;
int err;
@@ -803,19 +810,19 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
updates = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (updates) {
- err = btrfs_run_delayed_refs(trans, root, updates * 2);
+ err = btrfs_run_delayed_refs(trans, fs_info, updates * 2);
if (err) /* Error code will also eval true */
return err;
}
- return should_end_transaction(trans, root);
+ return should_end_transaction(trans);
}
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int throttle)
+ int throttle)
{
+ struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
- struct btrfs_fs_info *info = root->fs_info;
u64 transid = trans->transid;
unsigned long cur = trans->delayed_ref_updates;
int lock = (trans->type != TRANS_JOIN_NOLOCK);
@@ -828,16 +835,16 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
return 0;
}
- btrfs_trans_release_metadata(trans, root);
+ btrfs_trans_release_metadata(trans, info);
trans->block_rsv = NULL;
if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, info);
trans->delayed_ref_updates = 0;
if (!trans->sync) {
must_run_delayed_refs =
- btrfs_should_throttle_delayed_refs(trans, root);
+ btrfs_should_throttle_delayed_refs(trans, info);
cur = max_t(unsigned long, cur, 32);
/*
@@ -849,16 +856,16 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
must_run_delayed_refs = 2;
}
- btrfs_trans_release_metadata(trans, root);
+ btrfs_trans_release_metadata(trans, info);
trans->block_rsv = NULL;
if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, info);
btrfs_trans_release_chunk_metadata(trans);
- if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
- should_end_transaction(trans, root) &&
+ if (lock && !atomic_read(&info->open_ioctl_trans) &&
+ should_end_transaction(trans) &&
ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
spin_lock(&info->trans_lock);
if (cur_trans->state == TRANS_STATE_RUNNING)
@@ -868,13 +875,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
if (throttle)
- return btrfs_commit_transaction(trans, root);
+ return btrfs_commit_transaction(trans);
else
wake_up_process(info->transaction_kthread);
}
if (trans->type & __TRANS_FREEZABLE)
- sb_end_intwrite(root->fs_info->sb);
+ sb_end_intwrite(info->sb);
WARN_ON(cur_trans != info->running_transaction);
WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
@@ -893,10 +900,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
current->journal_info = NULL;
if (throttle)
- btrfs_run_delayed_iputs(root);
+ btrfs_run_delayed_iputs(info);
if (trans->aborted ||
- test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+ test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
wake_up_process(info->transaction_kthread);
err = -EIO;
}
@@ -904,22 +911,20 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (must_run_delayed_refs) {
- btrfs_async_run_delayed_refs(root, cur, transid,
+ btrfs_async_run_delayed_refs(info, cur, transid,
must_run_delayed_refs == 1);
}
return err;
}
-int btrfs_end_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+int btrfs_end_transaction(struct btrfs_trans_handle *trans)
{
- return __btrfs_end_transaction(trans, root, 0);
+ return __btrfs_end_transaction(trans, 0);
}
-int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
{
- return __btrfs_end_transaction(trans, root, 1);
+ return __btrfs_end_transaction(trans, 1);
}
/*
@@ -927,12 +932,12 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
* them in one of two extent_io trees. This is used to make sure all of
* those extents are sent to disk but does not wait on them
*/
-int btrfs_write_marked_extents(struct btrfs_root *root,
+int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark)
{
int err = 0;
int werr = 0;
- struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
+ struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
@@ -949,11 +954,11 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
* time a temporary error. So when it happens, ignore the error
* and wait for writeback of this range to finish - because we
* failed to set the bit EXTENT_NEED_WAIT for the range, a call
- * to btrfs_wait_marked_extents() would not know that writeback
- * for this range started and therefore wouldn't wait for it to
- * finish - we don't want to commit a superblock that points to
- * btree nodes/leafs for which writeback hasn't finished yet
- * (and without errors).
+ * to __btrfs_wait_marked_extents() would not know that
+ * writeback for this range started and therefore wouldn't
+ * wait for it to finish - we don't want to commit a
+ * superblock that points to btree nodes/leafs for which
+ * writeback hasn't finished yet (and without errors).
* We cleanup any entries left in the io tree when committing
* the transaction (through clear_btree_io_tree()).
*/
@@ -981,16 +986,15 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
* those extents are on disk for transaction or log commit. We wait
* on all the pages and clear them from the dirty pages state tree
*/
-int btrfs_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages, int mark)
+static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *dirty_pages)
{
int err = 0;
int werr = 0;
- struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
+ struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
- bool errors = false;
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
EXTENT_NEED_WAIT, &cached_state)) {
@@ -1018,27 +1022,45 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
}
if (err)
werr = err;
+ return werr;
+}
- if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
- if ((mark & EXTENT_DIRTY) &&
- test_and_clear_bit(BTRFS_FS_LOG1_ERR,
- &root->fs_info->flags))
- errors = true;
+int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *dirty_pages)
+{
+ bool errors = false;
+ int err;
- if ((mark & EXTENT_NEW) &&
- test_and_clear_bit(BTRFS_FS_LOG2_ERR,
- &root->fs_info->flags))
- errors = true;
- } else {
- if (test_and_clear_bit(BTRFS_FS_BTREE_ERR,
- &root->fs_info->flags))
- errors = true;
- }
+ err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
+ if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
+ errors = true;
- if (errors && !werr)
- werr = -EIO;
+ if (errors && !err)
+ err = -EIO;
+ return err;
+}
- return werr;
+int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
+{
+ struct btrfs_fs_info *fs_info = log_root->fs_info;
+ struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
+ bool errors = false;
+ int err;
+
+ ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+
+ err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
+ if ((mark & EXTENT_DIRTY) &&
+ test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
+ errors = true;
+
+ if ((mark & EXTENT_NEW) &&
+ test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
+ errors = true;
+
+ if (errors && !err)
+ err = -EIO;
+ return err;
}
/*
@@ -1046,7 +1068,7 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
* them in one of two extent_io trees. This is used to make sure all of
* those extents are on disk for transaction or log commit
*/
-static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
+static int btrfs_write_and_wait_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark)
{
int ret;
@@ -1054,9 +1076,9 @@ static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
struct blk_plug plug;
blk_start_plug(&plug);
- ret = btrfs_write_marked_extents(root, dirty_pages, mark);
+ ret = btrfs_write_marked_extents(fs_info, dirty_pages, mark);
blk_finish_plug(&plug);
- ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
+ ret2 = btrfs_wait_extents(fs_info, dirty_pages);
if (ret)
return ret;
@@ -1066,11 +1088,11 @@ static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
}
static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
int ret;
- ret = btrfs_write_and_wait_marked_extents(root,
+ ret = btrfs_write_and_wait_marked_extents(fs_info,
&trans->transaction->dirty_pages,
EXTENT_DIRTY);
clear_btree_io_tree(&trans->transaction->dirty_pages);
@@ -1094,7 +1116,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
int ret;
u64 old_root_bytenr;
u64 old_root_used;
- struct btrfs_root *tree_root = root->fs_info->tree_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *tree_root = fs_info->tree_root;
old_root_used = btrfs_root_used(&root->root_item);
@@ -1125,9 +1148,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
* to clean up the delayed refs.
*/
static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
struct list_head *io_bgs = &trans->transaction->io_bgs;
struct list_head *next;
@@ -1143,30 +1165,31 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
- ret = btrfs_run_dev_stats(trans, root->fs_info);
+ ret = btrfs_run_dev_stats(trans, fs_info);
if (ret)
return ret;
- ret = btrfs_run_dev_replace(trans, root->fs_info);
+ ret = btrfs_run_dev_replace(trans, fs_info);
if (ret)
return ret;
- ret = btrfs_run_qgroups(trans, root->fs_info);
+ ret = btrfs_run_qgroups(trans, fs_info);
if (ret)
return ret;
- ret = btrfs_setup_space_cache(trans, root);
+ ret = btrfs_setup_space_cache(trans, fs_info);
if (ret)
return ret;
/* run_qgroups might have added some more refs */
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
again:
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
+ struct btrfs_root *root;
next = fs_info->dirty_cowonly_roots.next;
list_del_init(next);
root = list_entry(next, struct btrfs_root, dirty_list);
@@ -1178,16 +1201,16 @@ again:
ret = update_cowonly_root(trans, root);
if (ret)
return ret;
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
}
while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
- ret = btrfs_write_dirty_block_groups(trans, root);
+ ret = btrfs_write_dirty_block_groups(trans, fs_info);
if (ret)
return ret;
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
}
@@ -1209,20 +1232,21 @@ again:
*/
void btrfs_add_dead_root(struct btrfs_root *root)
{
- spin_lock(&root->fs_info->trans_lock);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ spin_lock(&fs_info->trans_lock);
if (list_empty(&root->root_list))
- list_add_tail(&root->root_list, &root->fs_info->dead_roots);
- spin_unlock(&root->fs_info->trans_lock);
+ list_add_tail(&root->root_list, &fs_info->dead_roots);
+ spin_unlock(&fs_info->trans_lock);
}
/*
* update all the cowonly tree roots on disk
*/
static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_root *gang[8];
- struct btrfs_fs_info *fs_info = root->fs_info;
int i;
int ret;
int err = 0;
@@ -1236,7 +1260,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
if (ret == 0)
break;
for (i = 0; i < ret; i++) {
- root = gang[i];
+ struct btrfs_root *root = gang[i];
radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
@@ -1292,8 +1316,8 @@ int btrfs_defrag_root(struct btrfs_root *root)
ret = btrfs_defrag_leaves(trans, root);
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(info->tree_root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(info);
cond_resched();
if (btrfs_fs_closing(info) || ret != -EAGAIN)
@@ -1343,7 +1367,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
*/
mutex_lock(&fs_info->tree_log_mutex);
- ret = commit_fs_roots(trans, src);
+ ret = commit_fs_roots(trans, fs_info);
if (ret)
goto out;
ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
@@ -1372,11 +1396,11 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
* like chunk and root tree, as they won't affect qgroup.
* And we don't write super to avoid half committed status.
*/
- ret = commit_cowonly_roots(trans, src);
+ ret = commit_cowonly_roots(trans, fs_info);
if (ret)
goto out;
switch_commit_roots(trans->transaction, fs_info);
- ret = btrfs_write_and_wait_transaction(trans, src);
+ ret = btrfs_write_and_wait_transaction(trans, fs_info);
if (ret)
btrfs_handle_fs_error(fs_info, ret,
"Error while writing out transaction for qgroup");
@@ -1462,7 +1486,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
rsv = trans->block_rsv;
trans->block_rsv = &pending->block_rsv;
trans->bytes_reserved = trans->block_rsv->reserved;
- trace_btrfs_space_reservation(root->fs_info, "transaction",
+ trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid,
trans->bytes_reserved, 1);
dentry = pending->dentry;
@@ -1499,7 +1523,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* otherwise we corrupt the FS during
* snapshot
*/
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret) { /* Transaction aborted */
btrfs_abort_transaction(trans, ret);
goto fail;
@@ -1572,7 +1596,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
/*
* insert root back/forward references
*/
- ret = btrfs_add_root_ref(trans, tree_root, objectid,
+ ret = btrfs_add_root_ref(trans, fs_info, objectid,
parent_root->root_key.objectid,
btrfs_ino(parent_inode), index,
dentry->d_name.name, dentry->d_name.len);
@@ -1582,7 +1606,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
key.offset = (u64)-1;
- pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
+ pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
btrfs_abort_transaction(trans, ret);
@@ -1595,7 +1619,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto fail;
}
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@@ -1632,14 +1656,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_abort_transaction(trans, ret);
goto fail;
}
- ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
+ ret = btrfs_uuid_tree_add(trans, fs_info, new_uuid.b,
BTRFS_UUID_KEY_SUBVOL, objectid);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
- ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
+ ret = btrfs_uuid_tree_add(trans, fs_info,
new_root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
objectid);
@@ -1649,7 +1673,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
}
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@@ -1690,25 +1714,25 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
return ret;
}
-static void update_super_roots(struct btrfs_root *root)
+static void update_super_roots(struct btrfs_fs_info *fs_info)
{
struct btrfs_root_item *root_item;
struct btrfs_super_block *super;
- super = root->fs_info->super_copy;
+ super = fs_info->super_copy;
- root_item = &root->fs_info->chunk_root->root_item;
+ root_item = &fs_info->chunk_root->root_item;
super->chunk_root = root_item->bytenr;
super->chunk_root_generation = root_item->generation;
super->chunk_root_level = root_item->level;
- root_item = &root->fs_info->tree_root->root_item;
+ root_item = &fs_info->tree_root->root_item;
super->root = root_item->bytenr;
super->generation = root_item->generation;
super->root_level = root_item->level;
- if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
+ if (btrfs_test_opt(fs_info, SPACE_CACHE))
super->cache_generation = root_item->generation;
- if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &root->fs_info->flags))
+ if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
super->uuid_tree_generation = root_item->generation;
}
@@ -1742,24 +1766,23 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
* wait for the current transaction commit to start and block subsequent
* transaction joins
*/
-static void wait_current_trans_commit_start(struct btrfs_root *root,
+static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *trans)
{
- wait_event(root->fs_info->transaction_blocked_wait,
- trans->state >= TRANS_STATE_COMMIT_START ||
- trans->aborted);
+ wait_event(fs_info->transaction_blocked_wait,
+ trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
}
/*
* wait for the current transaction to start and then become unblocked.
* caller holds ref.
*/
-static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
- struct btrfs_transaction *trans)
+static void wait_current_trans_commit_start_and_unblock(
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_transaction *trans)
{
- wait_event(root->fs_info->transaction_wait,
- trans->state >= TRANS_STATE_UNBLOCKED ||
- trans->aborted);
+ wait_event(fs_info->transaction_wait,
+ trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
}
/*
@@ -1768,7 +1791,6 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
*/
struct btrfs_async_commit {
struct btrfs_trans_handle *newtrans;
- struct btrfs_root *root;
struct work_struct work;
};
@@ -1782,18 +1804,18 @@ static void do_async_commit(struct work_struct *work)
* Tell lockdep about it.
*/
if (ac->newtrans->type & __TRANS_FREEZABLE)
- __sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS);
+ __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
current->journal_info = ac->newtrans;
- btrfs_commit_transaction(ac->newtrans, ac->root);
+ btrfs_commit_transaction(ac->newtrans);
kfree(ac);
}
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
int wait_for_unblock)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_async_commit *ac;
struct btrfs_transaction *cur_trans;
@@ -1802,8 +1824,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
return -ENOMEM;
INIT_WORK(&ac->work, do_async_commit);
- ac->root = root;
- ac->newtrans = btrfs_join_transaction(root);
+ ac->newtrans = btrfs_join_transaction(trans->root);
if (IS_ERR(ac->newtrans)) {
int err = PTR_ERR(ac->newtrans);
kfree(ac);
@@ -1814,22 +1835,22 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
cur_trans = trans->transaction;
atomic_inc(&cur_trans->use_count);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
/*
* Tell lockdep we've released the freeze rwsem, since the
* async commit thread will be the one to unlock it.
*/
if (ac->newtrans->type & __TRANS_FREEZABLE)
- __sb_writers_release(root->fs_info->sb, SB_FREEZE_FS);
+ __sb_writers_release(fs_info->sb, SB_FREEZE_FS);
schedule_work(&ac->work);
/* wait for transaction to start and unblock */
if (wait_for_unblock)
- wait_current_trans_commit_start_and_unblock(root, cur_trans);
+ wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
else
- wait_current_trans_commit_start(root, cur_trans);
+ wait_current_trans_commit_start(fs_info, cur_trans);
if (current->journal_info == trans)
current->journal_info = NULL;
@@ -1842,6 +1863,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
static void cleanup_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int err)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
DEFINE_WAIT(wait);
@@ -1849,7 +1871,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
btrfs_abort_transaction(trans, err);
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
/*
* If the transaction is removed from the list, it means this
@@ -1859,25 +1881,25 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
BUG_ON(list_empty(&cur_trans->list));
list_del_init(&cur_trans->list);
- if (cur_trans == root->fs_info->running_transaction) {
+ if (cur_trans == fs_info->running_transaction) {
cur_trans->state = TRANS_STATE_COMMIT_DOING;
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
}
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
- btrfs_cleanup_one_transaction(trans->transaction, root);
+ btrfs_cleanup_one_transaction(trans->transaction, fs_info);
- spin_lock(&root->fs_info->trans_lock);
- if (cur_trans == root->fs_info->running_transaction)
- root->fs_info->running_transaction = NULL;
- spin_unlock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
+ if (cur_trans == fs_info->running_transaction)
+ fs_info->running_transaction = NULL;
+ spin_unlock(&fs_info->trans_lock);
if (trans->type & __TRANS_FREEZABLE)
- sb_end_intwrite(root->fs_info->sb);
+ sb_end_intwrite(fs_info->sb);
btrfs_put_transaction(cur_trans);
btrfs_put_transaction(cur_trans);
@@ -1885,7 +1907,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
if (current->journal_info == trans)
current->journal_info = NULL;
- btrfs_scrub_cancel(root->fs_info);
+ btrfs_scrub_cancel(fs_info);
kmem_cache_free(btrfs_trans_handle_cachep, trans);
}
@@ -1910,9 +1932,9 @@ btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans)
atomic_read(&cur_trans->pending_ordered) == 0);
}
-int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL;
int ret;
@@ -1920,20 +1942,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
/* Stop the commit early if ->aborted is set */
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
- ret = btrfs_run_delayed_refs(trans, root, 0);
+ ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (ret) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
- btrfs_trans_release_metadata(trans, root);
+ btrfs_trans_release_metadata(trans, fs_info);
trans->block_rsv = NULL;
cur_trans = trans->transaction;
@@ -1946,11 +1968,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
smp_wmb();
if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans, root);
+ btrfs_create_pending_block_groups(trans, fs_info);
- ret = btrfs_run_delayed_refs(trans, root, 0);
+ ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (ret) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -1970,27 +1992,27 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* hurt to have more than one go through, but there's no
* real advantage to it either.
*/
- mutex_lock(&root->fs_info->ro_block_group_mutex);
+ mutex_lock(&fs_info->ro_block_group_mutex);
if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
&cur_trans->flags))
run_it = 1;
- mutex_unlock(&root->fs_info->ro_block_group_mutex);
+ mutex_unlock(&fs_info->ro_block_group_mutex);
if (run_it)
- ret = btrfs_start_dirty_block_groups(trans, root);
+ ret = btrfs_start_dirty_block_groups(trans, fs_info);
}
if (ret) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
atomic_inc(&cur_trans->use_count);
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
- wait_for_commit(root, cur_trans);
+ wait_for_commit(cur_trans);
if (unlikely(cur_trans->aborted))
ret = cur_trans->aborted;
@@ -2001,35 +2023,35 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
}
cur_trans->state = TRANS_STATE_COMMIT_START;
- wake_up(&root->fs_info->transaction_blocked_wait);
+ wake_up(&fs_info->transaction_blocked_wait);
- if (cur_trans->list.prev != &root->fs_info->trans_list) {
+ if (cur_trans->list.prev != &fs_info->trans_list) {
prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list);
if (prev_trans->state != TRANS_STATE_COMPLETED) {
atomic_inc(&prev_trans->use_count);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
- wait_for_commit(root, prev_trans);
+ wait_for_commit(prev_trans);
ret = prev_trans->aborted;
btrfs_put_transaction(prev_trans);
if (ret)
goto cleanup_transaction;
} else {
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
}
} else {
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
}
extwriter_counter_dec(cur_trans, trans->type);
- ret = btrfs_start_delalloc_flush(root->fs_info);
+ ret = btrfs_start_delalloc_flush(fs_info);
if (ret)
goto cleanup_transaction;
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
goto cleanup_transaction;
@@ -2037,23 +2059,23 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
extwriter_counter_read(cur_trans) == 0);
/* some pending stuffs might be added after the previous flush. */
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
goto cleanup_transaction;
- btrfs_wait_delalloc_flush(root->fs_info);
+ btrfs_wait_delalloc_flush(fs_info);
btrfs_wait_pending_ordered(cur_trans);
- btrfs_scrub_pause(root);
+ btrfs_scrub_pause(fs_info);
/*
* Ok now we need to make sure to block out any other joins while we
* commit the transaction. We could have started a join before setting
* COMMIT_DOING so make sure to wait for num_writers to == 1 again.
*/
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
cur_trans->state = TRANS_STATE_COMMIT_DOING;
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
@@ -2067,16 +2089,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* the balancing code from coming in and moving
* extents around in the middle of the commit
*/
- mutex_lock(&root->fs_info->reloc_mutex);
+ mutex_lock(&fs_info->reloc_mutex);
/*
* We needn't worry about the delayed items because we will
* deal with them in create_pending_snapshot(), which is the
* core function of the snapshot creation.
*/
- ret = create_pending_snapshots(trans, root->fs_info);
+ ret = create_pending_snapshots(trans, fs_info);
if (ret) {
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
@@ -2090,22 +2112,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* because all the tree which are snapshoted will be forced to COW
* the nodes and leaves.
*/
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret) {
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
- ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
/* Reocrd old roots for later qgroup accounting */
- ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info);
+ ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
if (ret) {
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
@@ -2113,7 +2135,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* make sure none of the code above managed to slip in a
* delayed item
*/
- btrfs_assert_delayed_root_empty(root);
+ btrfs_assert_delayed_root_empty(fs_info);
WARN_ON(cur_trans != trans->transaction);
@@ -2130,12 +2152,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* from now until after the super is written, we avoid races
* with the tree-log code.
*/
- mutex_lock(&root->fs_info->tree_log_mutex);
+ mutex_lock(&fs_info->tree_log_mutex);
- ret = commit_fs_roots(trans, root);
+ ret = commit_fs_roots(trans, fs_info);
if (ret) {
- mutex_unlock(&root->fs_info->tree_log_mutex);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
@@ -2143,28 +2165,28 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* Since the transaction is done, we can apply the pending changes
* before the next transaction.
*/
- btrfs_apply_pending_changes(root->fs_info);
+ btrfs_apply_pending_changes(fs_info);
/* commit_fs_roots gets rid of all the tree log roots, it is now
* safe to free the root of tree log roots
*/
- btrfs_free_log_root_tree(trans, root->fs_info);
+ btrfs_free_log_root_tree(trans, fs_info);
/*
* Since fs roots are all committed, we can get a quite accurate
* new_roots. So let's do quota accounting.
*/
- ret = btrfs_qgroup_account_extents(trans, root->fs_info);
+ ret = btrfs_qgroup_account_extents(trans, fs_info);
if (ret < 0) {
- mutex_unlock(&root->fs_info->tree_log_mutex);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
- ret = commit_cowonly_roots(trans, root);
+ ret = commit_cowonly_roots(trans, fs_info);
if (ret) {
- mutex_unlock(&root->fs_info->tree_log_mutex);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
@@ -2174,64 +2196,64 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
- mutex_unlock(&root->fs_info->tree_log_mutex);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
- btrfs_prepare_extent_commit(trans, root);
+ btrfs_prepare_extent_commit(trans, fs_info);
- cur_trans = root->fs_info->running_transaction;
+ cur_trans = fs_info->running_transaction;
- btrfs_set_root_node(&root->fs_info->tree_root->root_item,
- root->fs_info->tree_root->node);
- list_add_tail(&root->fs_info->tree_root->dirty_list,
+ btrfs_set_root_node(&fs_info->tree_root->root_item,
+ fs_info->tree_root->node);
+ list_add_tail(&fs_info->tree_root->dirty_list,
&cur_trans->switch_commits);
- btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
- root->fs_info->chunk_root->node);
- list_add_tail(&root->fs_info->chunk_root->dirty_list,
+ btrfs_set_root_node(&fs_info->chunk_root->root_item,
+ fs_info->chunk_root->node);
+ list_add_tail(&fs_info->chunk_root->dirty_list,
&cur_trans->switch_commits);
- switch_commit_roots(cur_trans, root->fs_info);
+ switch_commit_roots(cur_trans, fs_info);
assert_qgroups_uptodate(trans);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
- update_super_roots(root);
+ update_super_roots(fs_info);
- btrfs_set_super_log_root(root->fs_info->super_copy, 0);
- btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
- memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
- sizeof(*root->fs_info->super_copy));
+ btrfs_set_super_log_root(fs_info->super_copy, 0);
+ btrfs_set_super_log_root_level(fs_info->super_copy, 0);
+ memcpy(fs_info->super_for_commit, fs_info->super_copy,
+ sizeof(*fs_info->super_copy));
- btrfs_update_commit_device_size(root->fs_info);
- btrfs_update_commit_device_bytes_used(root, cur_trans);
+ btrfs_update_commit_device_size(fs_info);
+ btrfs_update_commit_device_bytes_used(fs_info, cur_trans);
- clear_bit(BTRFS_FS_LOG1_ERR, &root->fs_info->flags);
- clear_bit(BTRFS_FS_LOG2_ERR, &root->fs_info->flags);
+ clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
+ clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
btrfs_trans_release_chunk_metadata(trans);
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
cur_trans->state = TRANS_STATE_UNBLOCKED;
- root->fs_info->running_transaction = NULL;
- spin_unlock(&root->fs_info->trans_lock);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ fs_info->running_transaction = NULL;
+ spin_unlock(&fs_info->trans_lock);
+ mutex_unlock(&fs_info->reloc_mutex);
- wake_up(&root->fs_info->transaction_wait);
+ wake_up(&fs_info->transaction_wait);
- ret = btrfs_write_and_wait_transaction(trans, root);
+ ret = btrfs_write_and_wait_transaction(trans, fs_info);
if (ret) {
- btrfs_handle_fs_error(root->fs_info, ret,
- "Error while writing out transaction");
- mutex_unlock(&root->fs_info->tree_log_mutex);
+ btrfs_handle_fs_error(fs_info, ret,
+ "Error while writing out transaction");
+ mutex_unlock(&fs_info->tree_log_mutex);
goto scrub_continue;
}
- ret = write_ctree_super(trans, root, 0);
+ ret = write_ctree_super(trans, fs_info, 0);
if (ret) {
- mutex_unlock(&root->fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->tree_log_mutex);
goto scrub_continue;
}
@@ -2239,14 +2261,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* the super is written, we can safely allow the tree-loggers
* to go about their business
*/
- mutex_unlock(&root->fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->tree_log_mutex);
- btrfs_finish_extent_commit(trans, root);
+ btrfs_finish_extent_commit(trans, fs_info);
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
- btrfs_clear_space_info_full(root->fs_info);
+ btrfs_clear_space_info_full(fs_info);
- root->fs_info->last_trans_committed = cur_trans->transid;
+ fs_info->last_trans_committed = cur_trans->transid;
/*
* We needn't acquire the lock here because there is no other task
* which can change it.
@@ -2254,19 +2276,19 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
cur_trans->state = TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait);
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
list_del_init(&cur_trans->list);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
btrfs_put_transaction(cur_trans);
btrfs_put_transaction(cur_trans);
if (trans->type & __TRANS_FREEZABLE)
- sb_end_intwrite(root->fs_info->sb);
+ sb_end_intwrite(fs_info->sb);
- trace_btrfs_transaction_commit(root);
+ trace_btrfs_transaction_commit(trans->root);
- btrfs_scrub_continue(root);
+ btrfs_scrub_continue(fs_info);
if (current->journal_info == trans)
current->journal_info = NULL;
@@ -2277,23 +2299,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* If fs has been frozen, we can not handle delayed iputs, otherwise
* it'll result in deadlock about SB_FREEZE_FS.
*/
- if (current != root->fs_info->transaction_kthread &&
- current != root->fs_info->cleaner_kthread &&
- !root->fs_info->fs_frozen)
- btrfs_run_delayed_iputs(root);
+ if (current != fs_info->transaction_kthread &&
+ current != fs_info->cleaner_kthread && !fs_info->fs_frozen)
+ btrfs_run_delayed_iputs(fs_info);
return ret;
scrub_continue:
- btrfs_scrub_continue(root);
+ btrfs_scrub_continue(fs_info);
cleanup_transaction:
- btrfs_trans_release_metadata(trans, root);
+ btrfs_trans_release_metadata(trans, fs_info);
btrfs_trans_release_chunk_metadata(trans);
trans->block_rsv = NULL;
- btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
+ btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
if (current->journal_info == trans)
current->journal_info = NULL;
- cleanup_transaction(trans, root, ret);
+ cleanup_transaction(trans, trans->root, ret);
return ret;
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 6cf0d37d4f76..5dfb5590fff6 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -123,11 +123,6 @@ struct btrfs_trans_handle {
bool sync;
bool dirty;
unsigned int type;
- /*
- * this root is only needed to validate that the root passed to
- * start_transaction is the same as the one passed to end_transaction.
- * Subvolume quota depends on this
- */
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
struct seq_list delayed_ref_elem;
@@ -185,8 +180,7 @@ static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
delayed_refs->qgroup_to_skip = 0;
}
-int btrfs_end_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+int btrfs_end_transaction(struct btrfs_trans_handle *trans);
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
unsigned int num_items);
struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
@@ -202,27 +196,24 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
-int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
+int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
-int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
+int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
int wait_for_unblock);
-int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
-int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
-void btrfs_throttle(struct btrfs_root *root);
+int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
+int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
+void btrfs_throttle(struct btrfs_fs_info *fs_info);
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_write_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages, int mark);
-int btrfs_wait_marked_extents(struct btrfs_root *root,
+int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark);
+int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *dirty_pages);
+int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
void btrfs_put_transaction(struct btrfs_transaction *transaction);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 3d33c4e41e5f..f10bf5213ed8 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -142,12 +142,13 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
mutex_lock(&root->log_mutex);
if (root->log_root) {
- if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+ if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN;
goto out;
}
@@ -159,10 +160,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
}
} else {
- mutex_lock(&root->fs_info->tree_log_mutex);
- if (!root->fs_info->log_root_tree)
- ret = btrfs_init_log_root_tree(trans, root->fs_info);
- mutex_unlock(&root->fs_info->tree_log_mutex);
+ mutex_lock(&fs_info->tree_log_mutex);
+ if (!fs_info->log_root_tree)
+ ret = btrfs_init_log_root_tree(trans, fs_info);
+ mutex_unlock(&fs_info->tree_log_mutex);
if (ret)
goto out;
@@ -292,25 +293,26 @@ static int process_one_buffer(struct btrfs_root *log,
struct extent_buffer *eb,
struct walk_control *wc, u64 gen)
{
+ struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
/*
* If this fs is mixed then we need to be able to process the leaves to
* pin down any logged extents, so we have to read the block.
*/
- if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
+ if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
ret = btrfs_read_buffer(eb, gen);
if (ret)
return ret;
}
if (wc->pin)
- ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
- eb->start, eb->len);
+ ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
+ eb->len);
if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
if (wc->pin && btrfs_header_level(eb) == 0)
- ret = btrfs_exclude_logged_extents(log, eb);
+ ret = btrfs_exclude_logged_extents(fs_info, eb);
if (wc->write)
btrfs_write_tree_block(eb);
if (wc->wait)
@@ -339,6 +341,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
u32 item_size;
u64 saved_i_size = 0;
@@ -459,9 +462,9 @@ insert:
found_size = btrfs_item_size_nr(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
- btrfs_truncate_item(root, path, item_size, 1);
+ btrfs_truncate_item(fs_info, path, item_size, 1);
else if (found_size < item_size)
- btrfs_extend_item(root, path,
+ btrfs_extend_item(fs_info, path,
item_size - found_size);
} else if (ret) {
return ret;
@@ -582,6 +585,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int found_type;
u64 extent_end;
u64 start = key->offset;
@@ -608,7 +612,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size = btrfs_file_extent_inline_len(eb, slot, item);
nbytes = btrfs_file_extent_ram_bytes(eb, item);
- extent_end = ALIGN(start + size, root->sectorsize);
+ extent_end = ALIGN(start + size,
+ fs_info->sectorsize);
} else {
ret = 0;
goto out;
@@ -689,7 +694,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* as the owner of the file extent changed from log tree
* (doesn't affect qgroup) to fs/file tree(affects qgroup)
*/
- ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+ ret = btrfs_qgroup_trace_extent(trans, fs_info,
btrfs_file_extent_disk_bytenr(eb, item),
btrfs_file_extent_disk_num_bytes(eb, item),
GFP_NOFS);
@@ -704,10 +709,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* is this extent already allocated in the extent
* allocation tree? If so, just add a reference
*/
- ret = btrfs_lookup_data_extent(root, ins.objectid,
+ ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
ins.offset);
if (ret == 0) {
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, fs_info,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset);
@@ -719,7 +724,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* allocation tree
*/
ret = btrfs_alloc_logged_file_extent(trans,
- root, root->root_key.objectid,
+ fs_info,
+ root->root_key.objectid,
key->objectid, offset, &ins);
if (ret)
goto out;
@@ -796,14 +802,12 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum,
list);
if (!ret)
- ret = btrfs_del_csums(trans,
- root->fs_info->csum_root,
- sums->bytenr,
- sums->len);
+ ret = btrfs_del_csums(trans, fs_info,
+ sums->bytenr,
+ sums->len);
if (!ret)
ret = btrfs_csum_file_blocks(trans,
- root->fs_info->csum_root,
- sums);
+ fs_info->csum_root, sums);
list_del(&sums->list);
kfree(sums);
}
@@ -841,6 +845,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
struct inode *dir,
struct btrfs_dir_item *di)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode;
char *name;
int name_len;
@@ -873,7 +878,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
if (ret)
goto out;
else
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
out:
kfree(name);
iput(inode);
@@ -991,6 +996,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
u64 ref_index, char *name, int namelen,
int *search_done)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *victim_name;
int victim_name_len;
@@ -1049,7 +1055,7 @@ again:
kfree(victim_name);
if (ret)
return ret;
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
return ret;
*search_done = 1;
@@ -1120,7 +1126,8 @@ again:
victim_name_len);
if (!ret)
ret = btrfs_run_delayed_items(
- trans, root);
+ trans,
+ fs_info);
}
iput(victim_parent);
kfree(victim_name);
@@ -1811,6 +1818,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u32 item_size = btrfs_item_size_nr(eb, slot);
struct btrfs_dir_item *di;
@@ -1823,7 +1831,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(root, eb, di))
+ if (verify_dir_item(fs_info, eb, di))
return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
@@ -1940,12 +1948,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
next:
/* check the next slot in the tree to see if it is a valid item */
nritems = btrfs_header_nritems(path->nodes[0]);
+ path->slots[0]++;
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret)
goto out;
- } else {
- path->slots[0]++;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -1978,6 +1985,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
struct inode *dir,
struct btrfs_key *dir_key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct extent_buffer *eb;
int slot;
@@ -1999,7 +2007,7 @@ again:
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(root, eb, di)) {
+ if (verify_dir_item(fs_info, eb, di)) {
ret = -EIO;
goto out;
}
@@ -2046,7 +2054,7 @@ again:
ret = btrfs_unlink_inode(trans, root, dir, inode,
name, name_len);
if (!ret)
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
kfree(name);
iput(inode);
if (ret)
@@ -2407,6 +2415,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int *level,
struct walk_control *wc)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 root_owner;
u64 bytenr;
u64 ptr_gen;
@@ -2432,12 +2441,12 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent);
- next = btrfs_find_create_tree_block(root, bytenr);
+ next = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(next))
return PTR_ERR(next);
@@ -2459,16 +2468,16 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, root->fs_info,
- next);
+ clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(root_owner !=
BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(root,
- bytenr, blocksize);
+ ret = btrfs_free_and_pin_reserved_extent(
+ fs_info, bytenr,
+ blocksize);
if (ret) {
free_extent_buffer(next);
return ret;
@@ -2505,6 +2514,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int *level,
struct walk_control *wc)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 root_owner;
int i;
int slot;
@@ -2538,14 +2548,14 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, root->fs_info,
- next);
+ clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(root,
+ ret = btrfs_free_and_pin_reserved_extent(
+ fs_info,
path->nodes[*level]->start,
path->nodes[*level]->len);
if (ret)
@@ -2567,6 +2577,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
static int walk_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *log, struct walk_control *wc)
{
+ struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
int wret;
int level;
@@ -2615,15 +2626,15 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, log->fs_info, next);
+ clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(log->root_key.objectid !=
BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(log, next->start,
- next->len);
+ ret = btrfs_free_and_pin_reserved_extent(fs_info,
+ next->start, next->len);
if (ret)
goto out;
}
@@ -2641,14 +2652,15 @@ out:
static int update_log_root(struct btrfs_trans_handle *trans,
struct btrfs_root *log)
{
+ struct btrfs_fs_info *fs_info = log->fs_info;
int ret;
if (log->log_transid == 1) {
/* insert root item on the first sync */
- ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
+ ret = btrfs_insert_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item);
} else {
- ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
+ ret = btrfs_update_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item);
}
return ret;
@@ -2742,8 +2754,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
int index2;
int mark;
int ret;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root;
- struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
+ struct btrfs_root *log_root_tree = fs_info->log_root_tree;
int log_transid = 0;
struct btrfs_log_ctx root_log_ctx;
struct blk_plug plug;
@@ -2771,7 +2784,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
while (1) {
int batch = atomic_read(&root->log_batch);
/* when we're on an ssd, just kick the log commit out */
- if (!btrfs_test_opt(root->fs_info, SSD) &&
+ if (!btrfs_test_opt(fs_info, SSD) &&
test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
mutex_unlock(&root->log_mutex);
schedule_timeout_uninterruptible(1);
@@ -2783,7 +2796,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
/* bail out if we need to do a full commit */
- if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+ if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN;
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
@@ -2799,12 +2812,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* wait for them until later.
*/
blk_start_plug(&plug);
- ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
+ ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
if (ret) {
blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
mutex_unlock(&root->log_mutex);
goto out;
}
@@ -2849,14 +2862,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
list_del_init(&root_log_ctx.list);
blk_finish_plug(&plug);
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
if (ret != -ENOSPC) {
btrfs_abort_transaction(trans, ret);
mutex_unlock(&log_root_tree->log_mutex);
goto out;
}
- btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_wait_tree_log_extents(log, mark);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
@@ -2874,8 +2887,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
index2 = root_log_ctx.log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
- ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
- mark);
+ ret = btrfs_wait_tree_log_extents(log, mark);
btrfs_wait_logged_extents(trans, log, log_transid);
wait_log_commit(log_root_tree,
root_log_ctx.log_transid);
@@ -2898,43 +2910,42 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* now that we've moved on to the tree of log tree roots,
* check the full commit flag again
*/
- if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+ if (btrfs_need_log_full_commit(fs_info, trans)) {
blk_finish_plug(&plug);
- btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_wait_tree_log_extents(log, mark);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
}
- ret = btrfs_write_marked_extents(log_root_tree,
+ ret = btrfs_write_marked_extents(fs_info,
&log_root_tree->dirty_log_pages,
EXTENT_DIRTY | EXTENT_NEW);
blk_finish_plug(&plug);
if (ret) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
- ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ ret = btrfs_wait_tree_log_extents(log, mark);
if (!ret)
- ret = btrfs_wait_marked_extents(log_root_tree,
- &log_root_tree->dirty_log_pages,
- EXTENT_NEW | EXTENT_DIRTY);
+ ret = btrfs_wait_tree_log_extents(log_root_tree,
+ EXTENT_NEW | EXTENT_DIRTY);
if (ret) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
btrfs_wait_logged_extents(trans, log, log_transid);
- btrfs_set_super_log_root(root->fs_info->super_for_commit,
- log_root_tree->node->start);
- btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
- btrfs_header_level(log_root_tree->node));
+ btrfs_set_super_log_root(fs_info->super_for_commit,
+ log_root_tree->node->start);
+ btrfs_set_super_log_root_level(fs_info->super_for_commit,
+ btrfs_header_level(log_root_tree->node));
log_root_tree->log_transid++;
mutex_unlock(&log_root_tree->log_mutex);
@@ -2946,9 +2957,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* the running transaction open, so a full commit can't hop
* in and cause problems either.
*/
- ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
+ ret = write_ctree_super(trans, fs_info, 1);
if (ret) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
goto out_wake_log_root;
}
@@ -3182,6 +3193,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct inode *inode, u64 dirid)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log;
u64 index;
int ret;
@@ -3199,7 +3211,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
dirid, &index);
mutex_unlock(&BTRFS_I(inode)->log_mutex);
if (ret == -ENOSPC) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
ret = 0;
} else if (ret < 0 && ret != -ENOENT)
btrfs_abort_transaction(trans, ret);
@@ -3606,6 +3618,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
int start_slot, int nr, int inode_only,
u64 logged_isize)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
unsigned long src_offset;
unsigned long dst_offset;
struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
@@ -3716,7 +3729,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
}
ret = btrfs_lookup_csums_range(
- log->fs_info->csum_root,
+ fs_info->csum_root,
ds + cs, ds + cs + cl - 1,
&ordered_sums, 0);
if (ret) {
@@ -3789,7 +3802,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
src_path->slots[0],
extent);
*last_extent = ALIGN(key.offset + len,
- log->sectorsize);
+ fs_info->sectorsize);
} else {
len = btrfs_file_extent_num_bytes(src, extent);
*last_extent = key.offset + len;
@@ -3852,7 +3865,8 @@ fill_holes:
if (btrfs_file_extent_type(src, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
len = btrfs_file_extent_inline_len(src, i, extent);
- extent_end = ALIGN(key.offset + len, log->sectorsize);
+ extent_end = ALIGN(key.offset + len,
+ fs_info->sectorsize);
} else {
len = btrfs_file_extent_num_bytes(src, extent);
extent_end = key.offset + len;
@@ -3902,6 +3916,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
const struct list_head *logged_list,
bool *ordered_io_error)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ordered_extent *ordered;
struct btrfs_root *log = root->log_root;
u64 mod_start = em->mod_start;
@@ -4018,7 +4033,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
}
/* block start is already adjusted for the file extent offset. */
- ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
+ ret = btrfs_lookup_csums_range(fs_info->csum_root,
em->block_start + csum_offset,
em->block_start + csum_offset +
csum_len - 1, &ordered_sums, 0);
@@ -4361,6 +4376,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
struct inode *inode,
struct btrfs_path *path)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_key key;
u64 hole_start;
@@ -4370,7 +4386,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
const u64 ino = btrfs_ino(inode);
const u64 i_size = i_size_read(inode);
- if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ if (!btrfs_fs_incompat(fs_info, NO_HOLES))
return 0;
key.objectid = ino;
@@ -4427,7 +4443,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
if (hole_size == 0)
return 0;
- hole_size = ALIGN(hole_size, root->sectorsize);
+ hole_size = ALIGN(hole_size, fs_info->sectorsize);
ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
hole_size, 0, hole_size, 0, 0, 0);
return ret;
@@ -4585,6 +4601,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
const loff_t end,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_path *dst_path;
struct btrfs_key min_key;
@@ -4637,7 +4654,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
*/
if (S_ISDIR(inode->i_mode) ||
- BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
+ BTRFS_I(inode)->generation > fs_info->last_trans_committed)
ret = btrfs_commit_inode_delayed_items(trans, inode);
else
ret = btrfs_commit_inode_delayed_inode(inode);
@@ -4774,7 +4791,7 @@ again:
inode_key.objectid = other_ino;
inode_key.type = BTRFS_INODE_ITEM_KEY;
inode_key.offset = 0;
- other_inode = btrfs_iget(root->fs_info->sb,
+ other_inode = btrfs_iget(fs_info->sb,
&inode_key, root,
NULL);
/*
@@ -5138,6 +5155,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
struct inode *start_inode,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root;
struct btrfs_path *path;
LIST_HEAD(dir_list);
@@ -5205,8 +5223,8 @@ process_leaf:
if (di_key.type == BTRFS_ROOT_ITEM_KEY)
continue;
- di_inode = btrfs_iget(root->fs_info->sb, &di_key,
- root, NULL);
+ btrfs_release_path(path);
+ di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
goto next_dir_inode;
@@ -5214,13 +5232,12 @@ process_leaf:
if (btrfs_inode_in_log(di_inode, trans->transid)) {
iput(di_inode);
- continue;
+ break;
}
ctx->log_new_dentries = false;
if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
log_mode = LOG_INODE_ALL;
- btrfs_release_path(path);
ret = btrfs_log_inode(trans, root, di_inode,
log_mode, 0, LLONG_MAX, ctx);
if (!ret &&
@@ -5268,6 +5285,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct inode *inode,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
struct btrfs_path *path;
struct btrfs_key key;
@@ -5332,7 +5350,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
cur_offset = item_size;
}
- dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
+ dir_inode = btrfs_iget(fs_info->sb, &inode_key,
root, NULL);
/* If parent inode was deleted, skip it. */
if (IS_ERR(dir_inode))
@@ -5374,17 +5392,18 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
int exists_only,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb;
struct dentry *old_parent = NULL;
int ret = 0;
- u64 last_committed = root->fs_info->last_trans_committed;
+ u64 last_committed = fs_info->last_trans_committed;
bool log_dentries = false;
struct inode *orig_inode = inode;
sb = inode->i_sb;
- if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
+ if (btrfs_test_opt(fs_info, NOTREELOG)) {
ret = 1;
goto end_no_trans;
}
@@ -5393,8 +5412,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
* The prev transaction commit doesn't complete, we need do
* full commit by ourselves.
*/
- if (root->fs_info->last_trans_log_full_commit >
- root->fs_info->last_trans_committed) {
+ if (fs_info->last_trans_log_full_commit >
+ fs_info->last_trans_committed) {
ret = 1;
goto end_no_trans;
}
@@ -5515,7 +5534,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
end_trans:
dput(old_parent);
if (ret < 0) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
ret = 1;
}
@@ -5675,7 +5694,7 @@ again:
btrfs_free_path(path);
/* step 4: commit the transaction, which also unpins the blocks */
- ret = btrfs_commit_transaction(trans, fs_info->tree_root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
@@ -5687,7 +5706,7 @@ again:
return 0;
error:
if (wc.trans)
- btrfs_end_transaction(wc.trans, fs_info->tree_root);
+ btrfs_end_transaction(wc.trans);
btrfs_free_path(path);
return ret;
}
@@ -5786,6 +5805,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *old_dir,
struct dentry *parent)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root * root = BTRFS_I(inode)->root;
/*
@@ -5800,9 +5820,9 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
* from hasn't been logged, we don't need to log it
*/
if (BTRFS_I(inode)->logged_trans <=
- root->fs_info->last_trans_committed &&
+ fs_info->last_trans_committed &&
(!old_dir || BTRFS_I(old_dir)->logged_trans <=
- root->fs_info->last_trans_committed))
+ fs_info->last_trans_committed))
return 0;
return btrfs_log_inode_parent(trans, root, inode, parent, 0,
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 7fc89e4adb41..161342b73ce5 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -92,9 +92,10 @@ out:
}
int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
- struct btrfs_root *uuid_root, u8 *uuid, u8 type,
+ struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
u64 subid_cpu)
{
+ struct btrfs_root *uuid_root = fs_info->uuid_root;
int ret;
struct btrfs_path *path = NULL;
struct btrfs_key key;
@@ -132,13 +133,13 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
* An item with that type already exists.
* Extend the item and store the new subid at the end.
*/
- btrfs_extend_item(uuid_root, path, sizeof(subid_le));
+ btrfs_extend_item(fs_info, path, sizeof(subid_le));
eb = path->nodes[0];
slot = path->slots[0];
offset = btrfs_item_ptr_offset(eb, slot);
offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
} else if (ret < 0) {
- btrfs_warn(uuid_root->fs_info,
+ btrfs_warn(fs_info,
"insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
ret, (unsigned long long)key.objectid,
(unsigned long long)key.offset, type);
@@ -156,9 +157,10 @@ out:
}
int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
- struct btrfs_root *uuid_root, u8 *uuid, u8 type,
+ struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
u64 subid)
{
+ struct btrfs_root *uuid_root = fs_info->uuid_root;
int ret;
struct btrfs_path *path = NULL;
struct btrfs_key key;
@@ -185,8 +187,8 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
if (ret < 0) {
- btrfs_warn(uuid_root->fs_info,
- "error %d while searching for uuid item!", ret);
+ btrfs_warn(fs_info, "error %d while searching for uuid item!",
+ ret);
goto out;
}
if (ret > 0) {
@@ -199,8 +201,7 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
offset = btrfs_item_ptr_offset(eb, slot);
item_size = btrfs_item_size_nr(eb, slot);
if (!IS_ALIGNED(item_size, sizeof(u64))) {
- btrfs_warn(uuid_root->fs_info,
- "uuid item with illegal size %lu!",
+ btrfs_warn(fs_info, "uuid item with illegal size %lu!",
(unsigned long)item_size);
ret = -ENOENT;
goto out;
@@ -230,7 +231,7 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
move_src = offset + sizeof(subid);
move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
memmove_extent_buffer(eb, move_dst, move_src, move_len);
- btrfs_truncate_item(uuid_root, path, item_size - sizeof(subid), 1);
+ btrfs_truncate_item(fs_info, path, item_size - sizeof(subid), 1);
out:
btrfs_free_path(path);
@@ -250,8 +251,8 @@ static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
goto out;
}
- ret = btrfs_uuid_tree_rem(trans, uuid_root, uuid, type, subid);
- btrfs_end_transaction(trans, uuid_root);
+ ret = btrfs_uuid_tree_rem(trans, uuid_root->fs_info, uuid, type, subid);
+ btrfs_end_transaction(trans);
out:
return ret;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0d7d635d8bfb..3c3c69c0eee4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -134,9 +134,9 @@ const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
};
static int init_first_rw_device(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_device *device);
-static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
+static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
@@ -343,9 +343,9 @@ static void requeue_list(struct btrfs_pending_bios *pending_bios,
*/
static noinline void run_scheduled_bios(struct btrfs_device *device)
{
+ struct btrfs_fs_info *fs_info = device->fs_info;
struct bio *pending;
struct backing_dev_info *bdi;
- struct btrfs_fs_info *fs_info;
struct btrfs_pending_bios *pending_bios;
struct bio *tail;
struct bio *cur;
@@ -367,7 +367,6 @@ static noinline void run_scheduled_bios(struct btrfs_device *device)
blk_start_plug(&plug);
bdi = blk_get_backing_dev_info(device->bdev);
- fs_info = device->dev_root->fs_info;
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
@@ -1179,7 +1178,7 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
u64 end, u64 *length)
{
struct btrfs_key key;
- struct btrfs_root *root = device->dev_root;
+ struct btrfs_root *root = device->fs_info->dev_root;
struct btrfs_dev_extent *dev_extent;
struct btrfs_path *path;
u64 extent_end;
@@ -1262,7 +1261,7 @@ static int contains_pending_extent(struct btrfs_transaction *transaction,
struct btrfs_device *device,
u64 *start, u64 len)
{
- struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = device->fs_info;
struct extent_map *em;
struct list_head *search_list = &fs_info->pinned_chunks;
int ret = 0;
@@ -1338,8 +1337,9 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
struct btrfs_device *device, u64 num_bytes,
u64 search_start, u64 *start, u64 *len)
{
+ struct btrfs_fs_info *fs_info = device->fs_info;
+ struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
- struct btrfs_root *root = device->dev_root;
struct btrfs_dev_extent *dev_extent;
struct btrfs_path *path;
u64 hole_size;
@@ -1357,7 +1357,7 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
* used by the boot loader (grub for example), so we make sure to start
* at an offset of at least 1MB.
*/
- min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
+ min_search_start = max(fs_info->alloc_start, 1024ull * 1024);
search_start = max(search_start, min_search_start);
path = btrfs_alloc_path();
@@ -1508,9 +1508,10 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 start, u64 *dev_extent_len)
{
+ struct btrfs_fs_info *fs_info = device->fs_info;
+ struct btrfs_root *root = fs_info->dev_root;
int ret;
struct btrfs_path *path;
- struct btrfs_root *root = device->dev_root;
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf = NULL;
@@ -1544,7 +1545,7 @@ again:
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
} else {
- btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed");
+ btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
goto out;
}
@@ -1552,8 +1553,8 @@ again:
ret = btrfs_del_item(trans, root, path);
if (ret) {
- btrfs_handle_fs_error(root->fs_info, ret,
- "Failed to remove dev extent item");
+ btrfs_handle_fs_error(fs_info, ret,
+ "Failed to remove dev extent item");
} else {
set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
}
@@ -1569,7 +1570,8 @@ static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
{
int ret;
struct btrfs_path *path;
- struct btrfs_root *root = device->dev_root;
+ struct btrfs_fs_info *fs_info = device->fs_info;
+ struct btrfs_root *root = fs_info->dev_root;
struct btrfs_dev_extent *extent;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -1595,8 +1597,7 @@ static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
- write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
- btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
+ write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
btrfs_mark_buffer_dirty(leaf);
@@ -1667,9 +1668,10 @@ error:
* the btrfs_device struct should be fully filled in
*/
static int btrfs_add_device(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
+ struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
struct btrfs_dev_item *dev_item;
@@ -1677,8 +1679,6 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans,
struct btrfs_key key;
unsigned long ptr;
- root = root->fs_info->chunk_root;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1713,7 +1713,7 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans,
ptr = btrfs_device_uuid(dev_item);
write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
ptr = btrfs_device_fsid(dev_item);
- write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
+ write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_UUID_SIZE);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
@@ -1737,16 +1737,15 @@ static void update_dev_time(char *path_name)
filp_close(filp, NULL);
}
-static int btrfs_rm_dev_item(struct btrfs_root *root,
+static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
+ struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_trans_handle *trans;
- root = root->fs_info->chunk_root;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1774,7 +1773,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
goto out;
out:
btrfs_free_path(path);
- btrfs_commit_transaction(trans, root);
+ btrfs_commit_transaction(trans);
return ret;
}
@@ -1853,7 +1852,7 @@ void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
fs_info->fs_devices->latest_bdev = next_device->bdev;
}
-int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
+int btrfs_rm_device(struct btrfs_fs_info *fs_info, char *device_path, u64 devid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
@@ -1863,20 +1862,20 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
mutex_lock(&uuid_mutex);
- num_devices = root->fs_info->fs_devices->num_devices;
- btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
- if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
+ num_devices = fs_info->fs_devices->num_devices;
+ btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
+ if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
WARN_ON(num_devices < 1);
num_devices--;
}
- btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
+ btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
- ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1);
+ ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
if (ret)
goto out;
- ret = btrfs_find_device_by_devspec(root, devid, device_path,
- &device);
+ ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
+ &device);
if (ret)
goto out;
@@ -1885,16 +1884,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
goto out;
}
- if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
+ if (device->writeable && fs_info->fs_devices->rw_devices == 1) {
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
goto out;
}
if (device->writeable) {
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
clear_super = true;
}
@@ -1909,12 +1908,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
* counter although write_all_supers() is not locked out. This
* could give a filesystem state which requires a degraded mount.
*/
- ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
+ ret = btrfs_rm_dev_item(fs_info, device);
if (ret)
goto error_undo;
device->in_fs_metadata = 0;
- btrfs_scrub_cancel_dev(root->fs_info, device);
+ btrfs_scrub_cancel_dev(fs_info, device);
/*
* the device list mutex makes sure that we don't change
@@ -1927,7 +1926,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
*/
cur_devices = device->fs_devices;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
list_del_rcu(&device->dev_list);
device->fs_devices->num_devices--;
@@ -1936,17 +1935,17 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
if (device->missing)
device->fs_devices->missing_devices--;
- btrfs_assign_next_active_device(root->fs_info, device, NULL);
+ btrfs_assign_next_active_device(fs_info, device, NULL);
if (device->bdev) {
device->fs_devices->open_devices--;
/* remove sysfs entry */
- btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
+ btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
}
- num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
- btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
+ btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
/*
* at this point, the device is zero sized and detached from
@@ -1961,7 +1960,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
if (cur_devices->open_devices == 0) {
struct btrfs_fs_devices *fs_devices;
- fs_devices = root->fs_info->fs_devices;
+ fs_devices = fs_info->fs_devices;
while (fs_devices) {
if (fs_devices->seed == cur_devices) {
fs_devices->seed = cur_devices->seed;
@@ -1974,8 +1973,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
free_fs_devices(cur_devices);
}
- root->fs_info->num_tolerated_disk_barrier_failures =
- btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
+ fs_info->num_tolerated_disk_barrier_failures =
+ btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
out:
mutex_unlock(&uuid_mutex);
@@ -1983,11 +1982,11 @@ out:
error_undo:
if (device->writeable) {
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
list_add(&device->dev_alloc_list,
- &root->fs_info->fs_devices->alloc_list);
+ &fs_info->fs_devices->alloc_list);
device->fs_devices->rw_devices++;
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
}
goto out;
}
@@ -2092,7 +2091,8 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
call_rcu(&tgtdev->rcu, free_device);
}
-static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
+static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
+ char *device_path,
struct btrfs_device **device)
{
int ret = 0;
@@ -2104,14 +2104,13 @@ static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
*device = NULL;
ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
- root->fs_info->bdev_holder, 0, &bdev, &bh);
+ fs_info->bdev_holder, 0, &bdev, &bh);
if (ret)
return ret;
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
- *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
- disk_super->fsid);
+ *device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
brelse(bh);
if (!*device)
ret = -ENOENT;
@@ -2119,7 +2118,7 @@ static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
return ret;
}
-int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
+int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device **device)
{
@@ -2128,7 +2127,7 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
struct list_head *devices;
struct btrfs_device *tmp;
- devices = &root->fs_info->fs_devices->devices;
+ devices = &fs_info->fs_devices->devices;
/*
* It is safe to read the devices since the volume_mutex
* is held by the caller.
@@ -2145,30 +2144,28 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
return 0;
} else {
- return btrfs_find_device_by_path(root, device_path, device);
+ return btrfs_find_device_by_path(fs_info, device_path, device);
}
}
/*
* Lookup a device given by device id, or the path if the id is 0.
*/
-int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
- char *devpath,
- struct btrfs_device **device)
+int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
+ char *devpath, struct btrfs_device **device)
{
int ret;
if (devid) {
ret = 0;
- *device = btrfs_find_device(root->fs_info, devid, NULL,
- NULL);
+ *device = btrfs_find_device(fs_info, devid, NULL, NULL);
if (!*device)
ret = -ENOENT;
} else {
if (!devpath || !devpath[0])
return -EINVAL;
- ret = btrfs_find_device_missing_or_by_path(root, devpath,
+ ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
device);
}
return ret;
@@ -2177,12 +2174,12 @@ int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
/*
* does all the dirty work required for changing file system's UUID.
*/
-static int btrfs_prepare_sprout(struct btrfs_root *root)
+static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_fs_devices *old_devices;
struct btrfs_fs_devices *seed_devices;
- struct btrfs_super_block *disk_super = root->fs_info->super_copy;
+ struct btrfs_super_block *disk_super = fs_info->super_copy;
struct btrfs_device *device;
u64 super_flags;
@@ -2208,15 +2205,15 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
INIT_LIST_HEAD(&seed_devices->alloc_list);
mutex_init(&seed_devices->device_list_mutex);
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
synchronize_rcu);
list_for_each_entry(device, &seed_devices->devices, dev_list)
device->fs_devices = seed_devices;
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
fs_devices->seeding = 0;
fs_devices->num_devices = 0;
@@ -2226,9 +2223,9 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
- memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
+ memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
super_flags = btrfs_super_flags(disk_super) &
~BTRFS_SUPER_FLAG_SEEDING;
@@ -2241,8 +2238,9 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
* Store the expected generation for seed devices in device items.
*/
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *root = fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dev_item *dev_item;
@@ -2257,7 +2255,6 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- root = root->fs_info->chunk_root;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.offset = 0;
key.type = BTRFS_DEV_ITEM_KEY;
@@ -2293,8 +2290,7 @@ next_slot:
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_UUID_SIZE);
- device = btrfs_find_device(root->fs_info, devid, dev_uuid,
- fs_uuid);
+ device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
BUG_ON(!device); /* Logic error */
if (device->fs_devices->seeding) {
@@ -2312,28 +2308,29 @@ error:
return ret;
}
-int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
+int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path)
{
+ struct btrfs_root *root = fs_info->dev_root;
struct request_queue *q;
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
struct block_device *bdev;
struct list_head *devices;
- struct super_block *sb = root->fs_info->sb;
+ struct super_block *sb = fs_info->sb;
struct rcu_string *name;
u64 tmp;
int seeding_dev = 0;
int ret = 0;
- if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
+ if ((sb->s_flags & MS_RDONLY) && !fs_info->fs_devices->seeding)
return -EROFS;
bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
- root->fs_info->bdev_holder);
+ fs_info->bdev_holder);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- if (root->fs_info->fs_devices->seeding) {
+ if (fs_info->fs_devices->seeding) {
seeding_dev = 1;
down_write(&sb->s_umount);
mutex_lock(&uuid_mutex);
@@ -2341,20 +2338,20 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
filemap_write_and_wait(bdev->bd_inode->i_mapping);
- devices = &root->fs_info->fs_devices->devices;
+ devices = &fs_info->fs_devices->devices;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
list_for_each_entry(device, devices, dev_list) {
if (device->bdev == bdev) {
ret = -EEXIST;
mutex_unlock(
- &root->fs_info->fs_devices->device_list_mutex);
+ &fs_info->fs_devices->device_list_mutex);
goto error;
}
}
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- device = btrfs_alloc_device(root->fs_info, NULL, NULL);
+ device = btrfs_alloc_device(fs_info, NULL, NULL);
if (IS_ERR(device)) {
/* we can safely leave the fs_devices entry around */
ret = PTR_ERR(device);
@@ -2382,13 +2379,13 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->can_discard = 1;
device->writeable = 1;
device->generation = trans->transid;
- device->io_width = root->sectorsize;
- device->io_align = root->sectorsize;
- device->sector_size = root->sectorsize;
+ device->io_width = fs_info->sectorsize;
+ device->io_align = fs_info->sectorsize;
+ device->sector_size = fs_info->sectorsize;
device->total_bytes = i_size_read(bdev->bd_inode);
device->disk_total_bytes = device->total_bytes;
device->commit_total_bytes = device->total_bytes;
- device->dev_root = root->fs_info->dev_root;
+ device->fs_info = fs_info;
device->bdev = bdev;
device->in_fs_metadata = 1;
device->is_tgtdev_for_dev_replace = 0;
@@ -2398,61 +2395,60 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if (seeding_dev) {
sb->s_flags &= ~MS_RDONLY;
- ret = btrfs_prepare_sprout(root);
+ ret = btrfs_prepare_sprout(fs_info);
BUG_ON(ret); /* -ENOMEM */
}
- device->fs_devices = root->fs_info->fs_devices;
+ device->fs_devices = fs_info->fs_devices;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- lock_chunks(root);
- list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->chunk_mutex);
+ list_add_rcu(&device->dev_list, &fs_info->fs_devices->devices);
list_add(&device->dev_alloc_list,
- &root->fs_info->fs_devices->alloc_list);
- root->fs_info->fs_devices->num_devices++;
- root->fs_info->fs_devices->open_devices++;
- root->fs_info->fs_devices->rw_devices++;
- root->fs_info->fs_devices->total_devices++;
- root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
+ &fs_info->fs_devices->alloc_list);
+ fs_info->fs_devices->num_devices++;
+ fs_info->fs_devices->open_devices++;
+ fs_info->fs_devices->rw_devices++;
+ fs_info->fs_devices->total_devices++;
+ fs_info->fs_devices->total_rw_bytes += device->total_bytes;
- spin_lock(&root->fs_info->free_chunk_lock);
- root->fs_info->free_chunk_space += device->total_bytes;
- spin_unlock(&root->fs_info->free_chunk_lock);
+ spin_lock(&fs_info->free_chunk_lock);
+ fs_info->free_chunk_space += device->total_bytes;
+ spin_unlock(&fs_info->free_chunk_lock);
if (!blk_queue_nonrot(bdev_get_queue(bdev)))
- root->fs_info->fs_devices->rotating = 1;
+ fs_info->fs_devices->rotating = 1;
- tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
- btrfs_set_super_total_bytes(root->fs_info->super_copy,
+ tmp = btrfs_super_total_bytes(fs_info->super_copy);
+ btrfs_set_super_total_bytes(fs_info->super_copy,
tmp + device->total_bytes);
- tmp = btrfs_super_num_devices(root->fs_info->super_copy);
- btrfs_set_super_num_devices(root->fs_info->super_copy,
- tmp + 1);
+ tmp = btrfs_super_num_devices(fs_info->super_copy);
+ btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1);
/* add sysfs device entry */
- btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
+ btrfs_sysfs_add_device_link(fs_info->fs_devices, device);
/*
* we've got more storage, clear any full flags on the space
* infos
*/
- btrfs_clear_space_info_full(root->fs_info);
+ btrfs_clear_space_info_full(fs_info);
- unlock_chunks(root);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
if (seeding_dev) {
- lock_chunks(root);
- ret = init_first_rw_device(trans, root, device);
- unlock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
+ ret = init_first_rw_device(trans, fs_info, device);
+ mutex_unlock(&fs_info->chunk_mutex);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_trans;
}
}
- ret = btrfs_add_device(trans, root, device);
+ ret = btrfs_add_device(trans, fs_info, device);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_trans;
@@ -2461,7 +2457,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if (seeding_dev) {
char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
- ret = btrfs_finish_sprout(trans, root);
+ ret = btrfs_finish_sprout(trans, fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_trans;
@@ -2471,16 +2467,15 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
* so rename the fsid on the sysfs
*/
snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
- root->fs_info->fsid);
- if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
- fsid_buf))
- btrfs_warn(root->fs_info,
- "sysfs: failed to create fsid for sprout");
+ fs_info->fsid);
+ if (kobject_rename(&fs_info->fs_devices->fsid_kobj, fsid_buf))
+ btrfs_warn(fs_info,
+ "sysfs: failed to create fsid for sprout");
}
- root->fs_info->num_tolerated_disk_barrier_failures =
- btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
- ret = btrfs_commit_transaction(trans, root);
+ fs_info->num_tolerated_disk_barrier_failures =
+ btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
+ ret = btrfs_commit_transaction(trans);
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
@@ -2489,9 +2484,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if (ret) /* transaction commit */
return ret;
- ret = btrfs_relocate_sys_chunks(root);
+ ret = btrfs_relocate_sys_chunks(fs_info);
if (ret < 0)
- btrfs_handle_fs_error(root->fs_info, ret,
+ btrfs_handle_fs_error(fs_info, ret,
"Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) {
@@ -2499,7 +2494,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
return 0;
return PTR_ERR(trans);
}
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
}
/* Update ctime/mtime for libblkid */
@@ -2507,9 +2502,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
return ret;
error_trans:
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
rcu_string_free(device->name);
- btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
+ btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
kfree(device);
error:
blkdev_put(bdev, FMODE_EXCL);
@@ -2520,14 +2515,14 @@ error:
return ret;
}
-int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ char *device_path,
struct btrfs_device *srcdev,
struct btrfs_device **device_out)
{
struct request_queue *q;
struct btrfs_device *device;
struct block_device *bdev;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *devices;
struct rcu_string *name;
u64 devid = BTRFS_DEV_REPLACE_DEVID;
@@ -2585,19 +2580,19 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
q = bdev_get_queue(bdev);
if (blk_queue_discard(q))
device->can_discard = 1;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
device->writeable = 1;
device->generation = 0;
- device->io_width = root->sectorsize;
- device->io_align = root->sectorsize;
- device->sector_size = root->sectorsize;
+ device->io_width = fs_info->sectorsize;
+ device->io_align = fs_info->sectorsize;
+ device->sector_size = fs_info->sectorsize;
device->total_bytes = btrfs_device_get_total_bytes(srcdev);
device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
device->bytes_used = btrfs_device_get_bytes_used(srcdev);
ASSERT(list_empty(&srcdev->resized_list));
device->commit_total_bytes = srcdev->commit_total_bytes;
device->commit_bytes_used = device->bytes_used;
- device->dev_root = fs_info->dev_root;
+ device->fs_info = fs_info;
device->bdev = bdev;
device->in_fs_metadata = 1;
device->is_tgtdev_for_dev_replace = 1;
@@ -2608,7 +2603,7 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
list_add(&device->dev_list, &fs_info->fs_devices->devices);
fs_info->fs_devices->num_devices++;
fs_info->fs_devices->open_devices++;
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
*device_out = device;
return ret;
@@ -2621,11 +2616,13 @@ error:
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
struct btrfs_device *tgtdev)
{
+ u32 sectorsize = fs_info->sectorsize;
+
WARN_ON(fs_info->fs_devices->rw_devices == 0);
- tgtdev->io_width = fs_info->dev_root->sectorsize;
- tgtdev->io_align = fs_info->dev_root->sectorsize;
- tgtdev->sector_size = fs_info->dev_root->sectorsize;
- tgtdev->dev_root = fs_info->dev_root;
+ tgtdev->io_width = sectorsize;
+ tgtdev->io_align = sectorsize;
+ tgtdev->sector_size = sectorsize;
+ tgtdev->fs_info = fs_info;
tgtdev->in_fs_metadata = 1;
}
@@ -2634,13 +2631,11 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
{
int ret;
struct btrfs_path *path;
- struct btrfs_root *root;
+ struct btrfs_root *root = device->fs_info->chunk_root;
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
struct btrfs_key key;
- root = device->dev_root->fs_info->chunk_root;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -2680,8 +2675,8 @@ out:
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size)
{
- struct btrfs_super_block *super_copy =
- device->dev_root->fs_info->super_copy;
+ struct btrfs_fs_info *fs_info = device->fs_info;
+ struct btrfs_super_block *super_copy = fs_info->super_copy;
struct btrfs_fs_devices *fs_devices;
u64 old_total;
u64 diff;
@@ -2689,41 +2684,41 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
if (!device->writeable)
return -EACCES;
- lock_chunks(device->dev_root);
+ mutex_lock(&fs_info->chunk_mutex);
old_total = btrfs_super_total_bytes(super_copy);
diff = new_size - device->total_bytes;
if (new_size <= device->total_bytes ||
device->is_tgtdev_for_dev_replace) {
- unlock_chunks(device->dev_root);
+ mutex_unlock(&fs_info->chunk_mutex);
return -EINVAL;
}
- fs_devices = device->dev_root->fs_info->fs_devices;
+ fs_devices = fs_info->fs_devices;
btrfs_set_super_total_bytes(super_copy, old_total + diff);
device->fs_devices->total_rw_bytes += diff;
btrfs_device_set_total_bytes(device, new_size);
btrfs_device_set_disk_total_bytes(device, new_size);
- btrfs_clear_space_info_full(device->dev_root->fs_info);
+ btrfs_clear_space_info_full(device->fs_info);
if (list_empty(&device->resized_list))
list_add_tail(&device->resized_list,
&fs_devices->resized_devices);
- unlock_chunks(device->dev_root);
+ mutex_unlock(&fs_info->chunk_mutex);
return btrfs_update_device(trans, device);
}
static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 chunk_objectid,
+ struct btrfs_fs_info *fs_info, u64 chunk_objectid,
u64 chunk_offset)
{
+ struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
- root = root->fs_info->chunk_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -2736,25 +2731,25 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
if (ret < 0)
goto out;
else if (ret > 0) { /* Logic error or corruption */
- btrfs_handle_fs_error(root->fs_info, -ENOENT,
- "Failed lookup while freeing chunk.");
+ btrfs_handle_fs_error(fs_info, -ENOENT,
+ "Failed lookup while freeing chunk.");
ret = -ENOENT;
goto out;
}
ret = btrfs_del_item(trans, root, path);
if (ret < 0)
- btrfs_handle_fs_error(root->fs_info, ret,
- "Failed to delete chunk item.");
+ btrfs_handle_fs_error(fs_info, ret,
+ "Failed to delete chunk item.");
out:
btrfs_free_path(path);
return ret;
}
-static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
- chunk_offset)
+static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info,
+ u64 chunk_objectid, u64 chunk_offset)
{
- struct btrfs_super_block *super_copy = root->fs_info->super_copy;
+ struct btrfs_super_block *super_copy = fs_info->super_copy;
struct btrfs_disk_key *disk_key;
struct btrfs_chunk *chunk;
u8 *ptr;
@@ -2765,7 +2760,7 @@ static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
u32 cur;
struct btrfs_key key;
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
array_size = btrfs_super_sys_array_size(super_copy);
ptr = super_copy->sys_chunk_array;
@@ -2795,25 +2790,22 @@ static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
cur += len;
}
}
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 chunk_offset)
+ struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
struct extent_map_tree *em_tree;
struct extent_map *em;
- struct btrfs_root *extent_root = root->fs_info->extent_root;
struct map_lookup *map;
u64 dev_extent_len = 0;
u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
int i, ret = 0;
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
- /* Just in case */
- root = root->fs_info->chunk_root;
- em_tree = &root->fs_info->mapping_tree.map_tree;
+ em_tree = &fs_info->mapping_tree.map_tree;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, 1);
@@ -2832,9 +2824,9 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
return -EINVAL;
}
map = em->map_lookup;
- lock_chunks(root->fs_info->chunk_root);
- check_system_chunk(trans, extent_root, map->type);
- unlock_chunks(root->fs_info->chunk_root);
+ mutex_lock(&fs_info->chunk_mutex);
+ check_system_chunk(trans, fs_info, map->type);
+ mutex_unlock(&fs_info->chunk_mutex);
/*
* Take the device list mutex to prevent races with the final phase of
@@ -2854,14 +2846,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
if (device->bytes_used > 0) {
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_bytes_used(device,
device->bytes_used - dev_extent_len);
- spin_lock(&root->fs_info->free_chunk_lock);
- root->fs_info->free_chunk_space += dev_extent_len;
- spin_unlock(&root->fs_info->free_chunk_lock);
- btrfs_clear_space_info_full(root->fs_info);
- unlock_chunks(root);
+ spin_lock(&fs_info->free_chunk_lock);
+ fs_info->free_chunk_space += dev_extent_len;
+ spin_unlock(&fs_info->free_chunk_lock);
+ btrfs_clear_space_info_full(fs_info);
+ mutex_unlock(&fs_info->chunk_mutex);
}
if (map->stripes[i].dev) {
@@ -2875,23 +2867,24 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
mutex_unlock(&fs_devices->device_list_mutex);
- ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
+ ret = btrfs_free_chunk(trans, fs_info, chunk_objectid, chunk_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
- trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
+ trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
- ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
+ ret = btrfs_del_sys_chunk(fs_info, chunk_objectid,
+ chunk_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
- ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
+ ret = btrfs_remove_block_group(trans, fs_info, chunk_offset, em);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -2903,15 +2896,12 @@ out:
return ret;
}
-static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
+static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
- struct btrfs_root *extent_root;
+ struct btrfs_root *root = fs_info->chunk_root;
struct btrfs_trans_handle *trans;
int ret;
- root = root->fs_info->chunk_root;
- extent_root = root->fs_info->extent_root;
-
/*
* Prevent races with automatic removal of unused block groups.
* After we relocate and before we remove the chunk with offset
@@ -2924,16 +2914,16 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
* we release the path used to search the chunk/dev tree and before
* the current task acquires this mutex and calls us.
*/
- ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
+ ASSERT(mutex_is_locked(&fs_info->delete_unused_bgs_mutex));
- ret = btrfs_can_relocate(extent_root, chunk_offset);
+ ret = btrfs_can_relocate(fs_info, chunk_offset);
if (ret)
return -ENOSPC;
/* step one, relocate all the extents inside this chunk */
- btrfs_scrub_pause(root);
- ret = btrfs_relocate_block_group(extent_root, chunk_offset);
- btrfs_scrub_continue(root);
+ btrfs_scrub_pause(fs_info);
+ ret = btrfs_relocate_block_group(fs_info, chunk_offset);
+ btrfs_scrub_continue(fs_info);
if (ret)
return ret;
@@ -2949,14 +2939,14 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
* step two, delete the device extents and the
* chunk tree entries
*/
- ret = btrfs_remove_chunk(trans, root, chunk_offset);
- btrfs_end_transaction(trans, extent_root);
+ ret = btrfs_remove_chunk(trans, fs_info, chunk_offset);
+ btrfs_end_transaction(trans);
return ret;
}
-static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
+static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
{
- struct btrfs_root *chunk_root = root->fs_info->chunk_root;
+ struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_chunk *chunk;
@@ -2977,10 +2967,10 @@ again:
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
- mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_lock(&fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
- mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
}
BUG_ON(ret == 0); /* Corruption */
@@ -2988,7 +2978,7 @@ again:
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
if (ret)
- mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret < 0)
goto error;
if (ret > 0)
@@ -3003,14 +2993,13 @@ again:
btrfs_release_path(path);
if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
- ret = btrfs_relocate_chunk(chunk_root,
- found_key.offset);
+ ret = btrfs_relocate_chunk(fs_info, found_key.offset);
if (ret == -ENOSPC)
failed++;
else
BUG_ON(ret);
}
- mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (found_key.offset == 0)
break;
@@ -3029,9 +3018,10 @@ error:
return ret;
}
-static int insert_balance_item(struct btrfs_root *root,
+static int insert_balance_item(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl)
{
+ struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
struct btrfs_balance_item *item;
struct btrfs_disk_balance_args disk_bargs;
@@ -3062,7 +3052,7 @@ static int insert_balance_item(struct btrfs_root *root,
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
- memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
+ memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
btrfs_set_balance_data(leaf, item, &disk_bargs);
@@ -3076,14 +3066,15 @@ static int insert_balance_item(struct btrfs_root *root,
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
- err = btrfs_commit_transaction(trans, root);
+ err = btrfs_commit_transaction(trans);
if (err && !ret)
ret = err;
return ret;
}
-static int del_balance_item(struct btrfs_root *root)
+static int del_balance_item(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
struct btrfs_key key;
@@ -3114,7 +3105,7 @@ static int del_balance_item(struct btrfs_root *root)
ret = btrfs_del_item(trans, root, path);
out:
btrfs_free_path(path);
- err = btrfs_commit_transaction(trans, root);
+ err = btrfs_commit_transaction(trans);
if (err && !ret)
ret = err;
return ret;
@@ -3369,11 +3360,11 @@ static int chunk_soft_convert_filter(u64 chunk_type,
return 0;
}
-static int should_balance_chunk(struct btrfs_root *root,
+static int should_balance_chunk(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 chunk_offset)
{
- struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
+ struct btrfs_balance_control *bctl = fs_info->balance_ctl;
struct btrfs_balance_args *bargs = NULL;
u64 chunk_type = btrfs_chunk_type(leaf, chunk);
@@ -3398,10 +3389,10 @@ static int should_balance_chunk(struct btrfs_root *root,
/* usage filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
- chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
+ chunk_usage_filter(fs_info, chunk_offset, bargs)) {
return 0;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
- chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
+ chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
return 0;
}
@@ -3521,7 +3512,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
ret = btrfs_grow_device(trans, device, old_size);
if (ret) {
- btrfs_end_transaction(trans, dev_root);
+ btrfs_end_transaction(trans);
/* btrfs_grow_device never returns ret > 0 */
WARN_ON(ret > 0);
btrfs_info_in_rcu(fs_info,
@@ -3531,7 +3522,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
goto error;
}
- btrfs_end_transaction(trans, dev_root);
+ btrfs_end_transaction(trans);
}
/* step two, relocate all the chunks */
@@ -3606,7 +3597,7 @@ again:
spin_unlock(&fs_info->balance_lock);
}
- ret = should_balance_chunk(chunk_root, leaf, chunk,
+ ret = should_balance_chunk(fs_info, leaf, chunk,
found_key.offset);
btrfs_release_path(path);
@@ -3659,9 +3650,9 @@ again:
goto error;
}
- ret = btrfs_force_chunk_alloc(trans, chunk_root,
+ ret = btrfs_force_chunk_alloc(trans, fs_info,
BTRFS_BLOCK_GROUP_DATA);
- btrfs_end_transaction(trans, chunk_root);
+ btrfs_end_transaction(trans);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
@@ -3669,8 +3660,7 @@ again:
chunk_reserved = 1;
}
- ret = btrfs_relocate_chunk(chunk_root,
- found_key.offset);
+ ret = btrfs_relocate_chunk(fs_info, found_key.offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret && ret != -ENOSPC)
goto error;
@@ -3741,7 +3731,7 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info)
int ret;
unset_balance_control(fs_info);
- ret = del_balance_item(fs_info->tree_root);
+ ret = del_balance_item(fs_info);
if (ret)
btrfs_handle_fs_error(fs_info, ret, NULL);
@@ -3874,7 +3864,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
bctl->sys.target));
}
- ret = insert_balance_item(fs_info->tree_root, bctl);
+ ret = insert_balance_item(fs_info, bctl);
if (ret && ret != -EEXIST)
goto out;
@@ -4166,7 +4156,7 @@ static int btrfs_uuid_scan_kthread(void *data)
}
update_tree:
if (!btrfs_is_empty_uuid(root_item.uuid)) {
- ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
+ ret = btrfs_uuid_tree_add(trans, fs_info,
root_item.uuid,
BTRFS_UUID_KEY_SUBVOL,
key.objectid);
@@ -4178,7 +4168,7 @@ update_tree:
}
if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
- ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
+ ret = btrfs_uuid_tree_add(trans, fs_info,
root_item.received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
key.objectid);
@@ -4191,7 +4181,7 @@ update_tree:
skip:
if (trans) {
- ret = btrfs_end_transaction(trans, fs_info->uuid_root);
+ ret = btrfs_end_transaction(trans);
trans = NULL;
if (ret)
break;
@@ -4216,7 +4206,7 @@ skip:
out:
btrfs_free_path(path);
if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans, fs_info->uuid_root);
+ btrfs_end_transaction(trans);
if (ret)
btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
else
@@ -4310,13 +4300,13 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
if (IS_ERR(uuid_root)) {
ret = PTR_ERR(uuid_root);
btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans, tree_root);
+ btrfs_end_transaction(trans);
return ret;
}
fs_info->uuid_root = uuid_root;
- ret = btrfs_commit_transaction(trans, tree_root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
@@ -4355,8 +4345,9 @@ int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
*/
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
+ struct btrfs_fs_info *fs_info = device->fs_info;
+ struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
- struct btrfs_root *root = device->dev_root;
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
u64 length;
@@ -4368,7 +4359,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
bool checked_pending_chunks = false;
struct extent_buffer *l;
struct btrfs_key key;
- struct btrfs_super_block *super_copy = root->fs_info->super_copy;
+ struct btrfs_super_block *super_copy = fs_info->super_copy;
u64 old_total = btrfs_super_total_bytes(super_copy);
u64 old_size = btrfs_device_get_total_bytes(device);
u64 diff = old_size - new_size;
@@ -4382,16 +4373,16 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
path->reada = READA_FORWARD;
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, new_size);
if (device->writeable) {
device->fs_devices->total_rw_bytes -= diff;
- spin_lock(&root->fs_info->free_chunk_lock);
- root->fs_info->free_chunk_space -= diff;
- spin_unlock(&root->fs_info->free_chunk_lock);
+ spin_lock(&fs_info->free_chunk_lock);
+ fs_info->free_chunk_space -= diff;
+ spin_unlock(&fs_info->free_chunk_lock);
}
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
again:
key.objectid = device->devid;
@@ -4399,16 +4390,16 @@ again:
key.type = BTRFS_DEV_EXTENT_KEY;
do {
- mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_lock(&fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
- mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto done;
}
ret = btrfs_previous_item(root, path, 0, key.type);
if (ret)
- mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret < 0)
goto done;
if (ret) {
@@ -4422,7 +4413,7 @@ again:
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != device->devid) {
- mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
btrfs_release_path(path);
break;
}
@@ -4431,7 +4422,7 @@ again:
length = btrfs_dev_extent_length(l, dev_extent);
if (key.offset + length <= new_size) {
- mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
btrfs_release_path(path);
break;
}
@@ -4439,8 +4430,8 @@ again:
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
btrfs_release_path(path);
- ret = btrfs_relocate_chunk(root, chunk_offset);
- mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+ ret = btrfs_relocate_chunk(fs_info, chunk_offset);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret && ret != -ENOSPC)
goto done;
if (ret == -ENOSPC)
@@ -4463,7 +4454,7 @@ again:
goto done;
}
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
/*
* We checked in the above loop all device extents that were already in
@@ -4483,11 +4474,11 @@ again:
if (contains_pending_extent(trans->transaction, device,
&start, len)) {
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
checked_pending_chunks = true;
failed = 0;
retried = false;
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
goto done;
goto again;
@@ -4497,44 +4488,44 @@ again:
btrfs_device_set_disk_total_bytes(device, new_size);
if (list_empty(&device->resized_list))
list_add_tail(&device->resized_list,
- &root->fs_info->fs_devices->resized_devices);
+ &fs_info->fs_devices->resized_devices);
WARN_ON(diff > old_total);
btrfs_set_super_total_bytes(super_copy, old_total - diff);
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
/* Now btrfs_update_device() will change the on-disk size. */
ret = btrfs_update_device(trans, device);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
done:
btrfs_free_path(path);
if (ret) {
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, old_size);
if (device->writeable)
device->fs_devices->total_rw_bytes += diff;
- spin_lock(&root->fs_info->free_chunk_lock);
- root->fs_info->free_chunk_space += diff;
- spin_unlock(&root->fs_info->free_chunk_lock);
- unlock_chunks(root);
+ spin_lock(&fs_info->free_chunk_lock);
+ fs_info->free_chunk_space += diff;
+ spin_unlock(&fs_info->free_chunk_lock);
+ mutex_unlock(&fs_info->chunk_mutex);
}
return ret;
}
-static int btrfs_add_system_chunk(struct btrfs_root *root,
+static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_key *key,
struct btrfs_chunk *chunk, int item_size)
{
- struct btrfs_super_block *super_copy = root->fs_info->super_copy;
+ struct btrfs_super_block *super_copy = fs_info->super_copy;
struct btrfs_disk_key disk_key;
u32 array_size;
u8 *ptr;
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
array_size = btrfs_super_sys_array_size(super_copy);
if (array_size + item_size + sizeof(disk_key)
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
return -EFBIG;
}
@@ -4545,7 +4536,7 @@ static int btrfs_add_system_chunk(struct btrfs_root *root,
memcpy(ptr, chunk, item_size);
item_size += sizeof(disk_key);
btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
return 0;
}
@@ -4583,7 +4574,7 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
btrfs_set_fs_incompat(info, RAID56);
}
-#define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r) \
+#define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r->fs_info) \
- sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
@@ -4593,10 +4584,10 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
/ sizeof(struct btrfs_stripe) + 1)
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 start,
+ struct btrfs_fs_info *fs_info, u64 start,
u64 type)
{
- struct btrfs_fs_info *info = extent_root->fs_info;
+ struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
struct list_head *cur;
struct map_lookup *map = NULL;
@@ -4762,12 +4753,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (type & BTRFS_BLOCK_GROUP_RAID5) {
raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
- extent_root->stripesize);
+ info->stripesize);
data_stripes = num_stripes - 1;
}
if (type & BTRFS_BLOCK_GROUP_RAID6) {
raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
- extent_root->stripesize);
+ info->stripesize);
data_stripes = num_stripes - 2;
}
@@ -4812,7 +4803,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
j * stripe_size;
}
}
- map->sector_size = extent_root->sectorsize;
+ map->sector_size = info->sectorsize;
map->stripe_len = raid_stripe_len;
map->io_align = raid_stripe_len;
map->io_width = raid_stripe_len;
@@ -4821,7 +4812,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
num_bytes = stripe_size * data_stripes;
- trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
+ trace_btrfs_chunk_alloc(info, map, start, num_bytes);
em = alloc_extent_map();
if (!em) {
@@ -4837,7 +4828,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
em->block_len = em->len;
em->orig_block_len = stripe_size;
- em_tree = &extent_root->fs_info->mapping_tree.map_tree;
+ em_tree = &info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
if (!ret) {
@@ -4850,7 +4841,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
goto error;
}
- ret = btrfs_make_block_group(trans, extent_root, 0, type,
+ ret = btrfs_make_block_group(trans, info, 0, type,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, num_bytes);
if (ret)
@@ -4861,13 +4852,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
}
- spin_lock(&extent_root->fs_info->free_chunk_lock);
- extent_root->fs_info->free_chunk_space -= (stripe_size *
- map->num_stripes);
- spin_unlock(&extent_root->fs_info->free_chunk_lock);
+ spin_lock(&info->free_chunk_lock);
+ info->free_chunk_space -= (stripe_size * map->num_stripes);
+ spin_unlock(&info->free_chunk_lock);
free_extent_map(em);
- check_raid56_incompat_flag(extent_root->fs_info, type);
+ check_raid56_incompat_flag(info, type);
kfree(devices_info);
return 0;
@@ -4889,11 +4879,12 @@ error:
}
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
+ struct btrfs_fs_info *fs_info,
u64 chunk_offset, u64 chunk_size)
{
+ struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_key key;
- struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
struct btrfs_device *device;
struct btrfs_chunk *chunk;
struct btrfs_stripe *stripe;
@@ -4906,20 +4897,19 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
int i = 0;
int ret = 0;
- em_tree = &extent_root->fs_info->mapping_tree.map_tree;
+ em_tree = &fs_info->mapping_tree.map_tree;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
read_unlock(&em_tree->lock);
if (!em) {
- btrfs_crit(extent_root->fs_info,
- "unable to find logical %Lu len %Lu",
+ btrfs_crit(fs_info, "unable to find logical %Lu len %Lu",
chunk_offset, chunk_size);
return -EINVAL;
}
if (em->start != chunk_offset || em->len != chunk_size) {
- btrfs_crit(extent_root->fs_info,
+ btrfs_crit(fs_info,
"found a bad mapping, wanted %Lu-%Lu, found %Lu-%Lu",
chunk_offset, chunk_size, em->start, em->len);
free_extent_map(em);
@@ -4943,7 +4933,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
* at any time during that final phase of the device replace operation
* (dev-replace.c:btrfs_dev_replace_finishing()).
*/
- mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
for (i = 0; i < map->num_stripes; i++) {
device = map->stripes[i].dev;
dev_offset = map->stripes[i].physical;
@@ -4960,7 +4950,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
break;
}
if (ret) {
- mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
goto out;
}
@@ -4974,7 +4964,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
stripe++;
}
- mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
btrfs_set_stack_chunk_length(chunk, chunk_size);
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
@@ -4983,7 +4973,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
- btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
+ btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
@@ -4996,8 +4986,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
* TODO: Cleanup of inserted chunk root in case of
* failure.
*/
- ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
- item_size);
+ ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
}
out:
@@ -5014,36 +5003,34 @@ out:
* bootstrap process of adding storage to a seed btrfs.
*/
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 type)
+ struct btrfs_fs_info *fs_info, u64 type)
{
u64 chunk_offset;
- ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
- chunk_offset = find_next_chunk(extent_root->fs_info);
- return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
+ ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
+ chunk_offset = find_next_chunk(fs_info);
+ return __btrfs_alloc_chunk(trans, fs_info, chunk_offset, type);
}
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
+ struct btrfs_root *extent_root = fs_info->extent_root;
u64 chunk_offset;
u64 sys_chunk_offset;
u64 alloc_profile;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_root *extent_root = fs_info->extent_root;
int ret;
chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
- ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
- alloc_profile);
+ ret = __btrfs_alloc_chunk(trans, fs_info, chunk_offset, alloc_profile);
if (ret)
return ret;
- sys_chunk_offset = find_next_chunk(root->fs_info);
+ sys_chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
- ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
+ ret = __btrfs_alloc_chunk(trans, fs_info, sys_chunk_offset,
alloc_profile);
return ret;
}
@@ -5066,11 +5053,11 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
return max_errors;
}
-int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
+int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
struct extent_map *em;
struct map_lookup *map;
- struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
int readonly = 0;
int miss_ndevs = 0;
int i;
@@ -5182,14 +5169,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
return ret;
}
-unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct btrfs_mapping_tree *map_tree,
u64 logical)
{
struct extent_map *em;
struct map_lookup *map;
struct extent_map_tree *em_tree = &map_tree->map_tree;
- unsigned long len = root->sectorsize;
+ unsigned long len = fs_info->sectorsize;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, len);
@@ -5329,7 +5316,8 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
kfree(bbio);
}
-static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
+static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
+ enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
int mirror_num, int need_raid_map)
@@ -5414,7 +5402,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
raid56_full_stripe_start *= full_stripe_len;
}
- if (op == REQ_OP_DISCARD) {
+ if (op == BTRFS_MAP_DISCARD) {
/* we don't discard raid56 yet */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = -EOPNOTSUPP;
@@ -5427,7 +5415,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
For other RAID types and for RAID[56] reads, just allow a single
stripe (on a single disk). */
if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
- (op == REQ_OP_WRITE)) {
+ (op == BTRFS_MAP_WRITE)) {
max_len = stripe_len * nr_data_stripes(map) -
(offset - raid56_full_stripe_start);
} else {
@@ -5452,8 +5440,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
btrfs_dev_replace_set_lock_blocking(dev_replace);
if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
- op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
- op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
+ op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
+ op != BTRFS_MAP_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
/*
* in dev-replace case, for repair case (that's the only
* case where the mirror is selected explicitly when
@@ -5474,7 +5462,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
int found = 0;
u64 physical_of_found = 0;
- ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
+ ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
logical, &tmp_length, &tmp_bbio, 0, 0);
if (ret) {
WARN_ON(tmp_bbio != NULL);
@@ -5484,7 +5472,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
tmp_num_stripes = tmp_bbio->num_stripes;
if (mirror_num > tmp_num_stripes) {
/*
- * REQ_GET_READ_MIRRORS does not contain this
+ * BTRFS_MAP_GET_READ_MIRRORS does not contain this
* mirror, that means that the requested area
* is not left of the left cursor
*/
@@ -5540,17 +5528,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
(offset + *length);
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
- if (op == REQ_OP_DISCARD)
+ if (op == BTRFS_MAP_DISCARD)
num_stripes = min_t(u64, map->num_stripes,
stripe_nr_end - stripe_nr_orig);
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
&stripe_index);
- if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
- op != REQ_GET_READ_MIRRORS)
+ if (op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
+ op != BTRFS_MAP_GET_READ_MIRRORS)
mirror_num = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
- op == REQ_GET_READ_MIRRORS)
+ if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD ||
+ op == BTRFS_MAP_GET_READ_MIRRORS)
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -5563,8 +5551,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
- op == REQ_GET_READ_MIRRORS) {
+ if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD ||
+ op == BTRFS_MAP_GET_READ_MIRRORS) {
num_stripes = map->num_stripes;
} else if (mirror_num) {
stripe_index = mirror_num - 1;
@@ -5578,9 +5566,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
stripe_index *= map->sub_stripes;
- if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
+ if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
num_stripes = map->sub_stripes;
- else if (op == REQ_OP_DISCARD)
+ else if (op == BTRFS_MAP_DISCARD)
num_stripes = min_t(u64, map->sub_stripes *
(stripe_nr_end - stripe_nr_orig),
map->num_stripes);
@@ -5598,7 +5586,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
if (need_raid_map &&
- (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS ||
+ (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS ||
mirror_num > 1)) {
/* push stripe_nr back to the start of the full stripe */
stripe_nr = div_u64(raid56_full_stripe_start,
@@ -5626,8 +5614,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
/* We distribute the parity blocks across stripes */
div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
&stripe_index);
- if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
- op != REQ_GET_READ_MIRRORS) && mirror_num <= 1)
+ if ((op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
+ op != BTRFS_MAP_GET_READ_MIRRORS) && mirror_num <= 1)
mirror_num = 1;
}
} else {
@@ -5650,9 +5638,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
num_alloc_stripes = num_stripes;
if (dev_replace_is_ongoing) {
- if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD)
+ if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD)
num_alloc_stripes <<= 1;
- if (op == REQ_GET_READ_MIRRORS)
+ if (op == BTRFS_MAP_GET_READ_MIRRORS)
num_alloc_stripes++;
tgtdev_indexes = num_stripes;
}
@@ -5668,7 +5656,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
/* build raid_map */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
need_raid_map &&
- ((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) ||
+ ((op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS) ||
mirror_num > 1)) {
u64 tmp;
unsigned rot;
@@ -5693,7 +5681,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
RAID6_Q_STRIPE;
}
- if (op == REQ_OP_DISCARD) {
+ if (op == BTRFS_MAP_DISCARD) {
u32 factor = 0;
u32 sub_stripes = 0;
u64 stripes_per_dev = 0;
@@ -5773,7 +5761,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
}
}
- if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
+ if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
max_errors = btrfs_chunk_max_errors(map);
if (bbio->raid_map)
@@ -5781,7 +5769,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
tgtdev_indexes = 0;
if (dev_replace_is_ongoing &&
- (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) &&
+ (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD) &&
dev_replace->tgtdev != NULL) {
int index_where_to_add;
u64 srcdev_devid = dev_replace->srcdev->devid;
@@ -5816,7 +5804,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
}
}
num_stripes = index_where_to_add;
- } else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) &&
+ } else if (dev_replace_is_ongoing &&
+ op == BTRFS_MAP_GET_READ_MIRRORS &&
dev_replace->tgtdev != NULL) {
u64 srcdev_devid = dev_replace->srcdev->devid;
int index_srcdev = 0;
@@ -5888,7 +5877,7 @@ out:
return ret;
}
-int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num)
{
@@ -5897,7 +5886,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
}
/* For Scrub/replace */
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num,
int need_raid_map)
@@ -6069,10 +6058,10 @@ static void btrfs_end_bio(struct bio *bio)
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-static noinline void btrfs_schedule_bio(struct btrfs_root *root,
- struct btrfs_device *device,
+static noinline void btrfs_schedule_bio(struct btrfs_device *device,
struct bio *bio)
{
+ struct btrfs_fs_info *fs_info = device->fs_info;
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
@@ -6095,7 +6084,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
* made progress against dirty pages when we've really just put it
* on a queue for later
*/
- atomic_inc(&root->fs_info->nr_async_bios);
+ atomic_inc(&fs_info->nr_async_bios);
WARN_ON(bio->bi_next);
bio->bi_next = NULL;
@@ -6117,15 +6106,14 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
spin_unlock(&device->io_lock);
if (should_queue)
- btrfs_queue_work(root->fs_info->submit_workers,
- &device->work);
+ btrfs_queue_work(fs_info->submit_workers, &device->work);
}
-static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
- struct bio *bio, u64 physical, int dev_nr,
- int async)
+static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
+ u64 physical, int dev_nr, int async)
{
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
+ struct btrfs_fs_info *fs_info = bbio->fs_info;
bio->bi_private = bbio;
btrfs_io_bio(bio)->stripe_index = dev_nr;
@@ -6148,10 +6136,10 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
#endif
bio->bi_bdev = dev->bdev;
- btrfs_bio_counter_inc_noblocked(root->fs_info);
+ btrfs_bio_counter_inc_noblocked(fs_info);
if (async)
- btrfs_schedule_bio(root, dev, bio);
+ btrfs_schedule_bio(dev, bio);
else
btrfsic_submit_bio(bio);
}
@@ -6170,7 +6158,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
}
}
-int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
+int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit)
{
struct btrfs_device *dev;
@@ -6186,11 +6174,11 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
length = bio->bi_iter.bi_size;
map_length = length;
- btrfs_bio_counter_inc_blocked(root->fs_info);
- ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical,
+ btrfs_bio_counter_inc_blocked(fs_info);
+ ret = __btrfs_map_block(fs_info, bio_op(bio), logical,
&map_length, &bbio, mirror_num, 1);
if (ret) {
- btrfs_bio_counter_dec(root->fs_info);
+ btrfs_bio_counter_dec(fs_info);
return ret;
}
@@ -6198,7 +6186,7 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
bbio->orig_bio = first_bio;
bbio->private = first_bio->bi_private;
bbio->end_io = first_bio->bi_end_io;
- bbio->fs_info = root->fs_info;
+ bbio->fs_info = fs_info;
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
@@ -6206,18 +6194,19 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
/* In this case, map_length has been set to the length of
a single stripe; not the whole write */
if (bio_op(bio) == REQ_OP_WRITE) {
- ret = raid56_parity_write(root, bio, bbio, map_length);
+ ret = raid56_parity_write(fs_info, bio, bbio,
+ map_length);
} else {
- ret = raid56_parity_recover(root, bio, bbio, map_length,
- mirror_num, 1);
+ ret = raid56_parity_recover(fs_info, bio, bbio,
+ map_length, mirror_num, 1);
}
- btrfs_bio_counter_dec(root->fs_info);
+ btrfs_bio_counter_dec(fs_info);
return ret;
}
if (map_length < length) {
- btrfs_crit(root->fs_info,
+ btrfs_crit(fs_info,
"mapping failed logical %llu bio len %llu len %llu",
logical, length, map_length);
BUG();
@@ -6237,11 +6226,10 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
} else
bio = first_bio;
- submit_stripe_bio(root, bbio, bio,
- bbio->stripes[dev_nr].physical, dev_nr,
- async_submit);
+ submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
+ dev_nr, async_submit);
}
- btrfs_bio_counter_dec(root->fs_info);
+ btrfs_bio_counter_dec(fs_info);
return 0;
}
@@ -6265,8 +6253,7 @@ struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
return NULL;
}
-static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
- struct btrfs_fs_devices *fs_devices,
+static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
@@ -6337,7 +6324,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
}
/* Return -EIO if any error, otherwise return 0. */
-static int btrfs_check_chunk_valid(struct btrfs_root *root,
+static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 logical)
{
@@ -6354,33 +6341,31 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
type = btrfs_chunk_type(leaf, chunk);
if (!num_stripes) {
- btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
+ btrfs_err(fs_info, "invalid chunk num_stripes: %u",
num_stripes);
return -EIO;
}
- if (!IS_ALIGNED(logical, root->sectorsize)) {
- btrfs_err(root->fs_info,
- "invalid chunk logical %llu", logical);
+ if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
+ btrfs_err(fs_info, "invalid chunk logical %llu", logical);
return -EIO;
}
- if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
- btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+ if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
+ btrfs_err(fs_info, "invalid chunk sectorsize %u",
btrfs_chunk_sector_size(leaf, chunk));
return -EIO;
}
- if (!length || !IS_ALIGNED(length, root->sectorsize)) {
- btrfs_err(root->fs_info,
- "invalid chunk length %llu", length);
+ if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
+ btrfs_err(fs_info, "invalid chunk length %llu", length);
return -EIO;
}
if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
- btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
+ btrfs_err(fs_info, "invalid chunk stripe length: %llu",
stripe_len);
return -EIO;
}
if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
type) {
- btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
+ btrfs_err(fs_info, "unrecognized chunk type: %llu",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk));
@@ -6393,7 +6378,7 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
(type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
num_stripes != 1)) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
@@ -6403,11 +6388,11 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
return 0;
}
-static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
- struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
u64 logical;
@@ -6424,7 +6409,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+ ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
if (ret)
return ret;
@@ -6471,23 +6456,22 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
- map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
+ map->stripes[i].dev = btrfs_find_device(fs_info, devid,
uuid, NULL);
if (!map->stripes[i].dev &&
- !btrfs_test_opt(root->fs_info, DEGRADED)) {
+ !btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
return -EIO;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
- add_missing_dev(root, root->fs_info->fs_devices,
- devid, uuid);
+ add_missing_dev(fs_info->fs_devices, devid,
+ uuid);
if (!map->stripes[i].dev) {
free_extent_map(em);
return -EIO;
}
- btrfs_warn(root->fs_info,
- "devid %llu uuid %pU is missing",
+ btrfs_warn(fs_info, "devid %llu uuid %pU is missing",
devid, uuid);
}
map->stripes[i].dev->in_fs_metadata = 1;
@@ -6525,7 +6509,7 @@ static void fill_device_from_item(struct extent_buffer *leaf,
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
}
-static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
+static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
u8 *fsid)
{
struct btrfs_fs_devices *fs_devices;
@@ -6533,7 +6517,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
BUG_ON(!mutex_is_locked(&uuid_mutex));
- fs_devices = root->fs_info->fs_devices->seed;
+ fs_devices = fs_info->fs_devices->seed;
while (fs_devices) {
if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
return fs_devices;
@@ -6543,7 +6527,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
fs_devices = find_fsid(fsid);
if (!fs_devices) {
- if (!btrfs_test_opt(root->fs_info, DEGRADED))
+ if (!btrfs_test_opt(fs_info, DEGRADED))
return ERR_PTR(-ENOENT);
fs_devices = alloc_fs_devices(fsid);
@@ -6560,7 +6544,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
return fs_devices;
ret = __btrfs_open_devices(fs_devices, FMODE_READ,
- root->fs_info->bdev_holder);
+ fs_info->bdev_holder);
if (ret) {
free_fs_devices(fs_devices);
fs_devices = ERR_PTR(ret);
@@ -6574,17 +6558,17 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
goto out;
}
- fs_devices->seed = root->fs_info->fs_devices->seed;
- root->fs_info->fs_devices->seed = fs_devices;
+ fs_devices->seed = fs_info->fs_devices->seed;
+ fs_info->fs_devices->seed = fs_devices;
out:
return fs_devices;
}
-static int read_one_dev(struct btrfs_root *root,
+static int read_one_dev(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item)
{
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
u64 devid;
int ret;
@@ -6597,24 +6581,24 @@ static int read_one_dev(struct btrfs_root *root,
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_UUID_SIZE);
- if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
- fs_devices = open_seed_devices(root, fs_uuid);
+ if (memcmp(fs_uuid, fs_info->fsid, BTRFS_UUID_SIZE)) {
+ fs_devices = open_seed_devices(fs_info, fs_uuid);
if (IS_ERR(fs_devices))
return PTR_ERR(fs_devices);
}
- device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
+ device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
if (!device) {
- if (!btrfs_test_opt(root->fs_info, DEGRADED))
+ if (!btrfs_test_opt(fs_info, DEGRADED))
return -EIO;
- device = add_missing_dev(root, fs_devices, devid, dev_uuid);
+ device = add_missing_dev(fs_devices, devid, dev_uuid);
if (!device)
return -ENOMEM;
- btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
+ btrfs_warn(fs_info, "devid %llu uuid %pU missing",
devid, dev_uuid);
} else {
- if (!device->bdev && !btrfs_test_opt(root->fs_info, DEGRADED))
+ if (!device->bdev && !btrfs_test_opt(fs_info, DEGRADED))
return -EIO;
if(!device->bdev && !device->missing) {
@@ -6643,7 +6627,7 @@ static int read_one_dev(struct btrfs_root *root,
}
}
- if (device->fs_devices != root->fs_info->fs_devices) {
+ if (device->fs_devices != fs_info->fs_devices) {
BUG_ON(device->writeable);
if (device->generation !=
btrfs_device_generation(leaf, dev_item))
@@ -6654,18 +6638,18 @@ static int read_one_dev(struct btrfs_root *root,
device->in_fs_metadata = 1;
if (device->writeable && !device->is_tgtdev_for_dev_replace) {
device->fs_devices->total_rw_bytes += device->total_bytes;
- spin_lock(&root->fs_info->free_chunk_lock);
- root->fs_info->free_chunk_space += device->total_bytes -
+ spin_lock(&fs_info->free_chunk_lock);
+ fs_info->free_chunk_space += device->total_bytes -
device->bytes_used;
- spin_unlock(&root->fs_info->free_chunk_lock);
+ spin_unlock(&fs_info->free_chunk_lock);
}
ret = 0;
return ret;
}
-int btrfs_read_sys_array(struct btrfs_root *root)
+int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *root = fs_info->tree_root;
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct extent_buffer *sb;
struct btrfs_disk_key *disk_key;
@@ -6680,13 +6664,13 @@ int btrfs_read_sys_array(struct btrfs_root *root)
u64 type;
struct btrfs_key key;
- ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
+ ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
/*
* This will create extent buffer of nodesize, superblock size is
* fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
* overallocate but we can keep it as-is, only the first page is used.
*/
- sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
+ sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
if (IS_ERR(sb))
return PTR_ERR(sb);
set_extent_buffer_uptodate(sb);
@@ -6757,7 +6741,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
if (cur_offset + len > array_size)
goto out_short_read;
- ret = read_one_chunk(root, &key, sb, chunk);
+ ret = read_one_chunk(fs_info, &key, sb, chunk);
if (ret)
break;
} else {
@@ -6783,8 +6767,9 @@ out_short_read:
return -EIO;
}
-int btrfs_read_chunk_tree(struct btrfs_root *root)
+int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *root = fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -6793,14 +6778,12 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
int slot;
u64 total_dev = 0;
- root = root->fs_info->chunk_root;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
mutex_lock(&uuid_mutex);
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
/*
* Read all device items, and then all the chunk items. All
@@ -6830,14 +6813,14 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
struct btrfs_dev_item *dev_item;
dev_item = btrfs_item_ptr(leaf, slot,
struct btrfs_dev_item);
- ret = read_one_dev(root, leaf, dev_item);
+ ret = read_one_dev(fs_info, leaf, dev_item);
if (ret)
goto error;
total_dev++;
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
- ret = read_one_chunk(root, &found_key, leaf, chunk);
+ ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
if (ret)
goto error;
}
@@ -6848,26 +6831,26 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
* After loading chunk tree, we've got all device information,
* do another round of validation checks.
*/
- if (total_dev != root->fs_info->fs_devices->total_devices) {
- btrfs_err(root->fs_info,
+ if (total_dev != fs_info->fs_devices->total_devices) {
+ btrfs_err(fs_info,
"super_num_devices %llu mismatch with num_devices %llu found here",
- btrfs_super_num_devices(root->fs_info->super_copy),
+ btrfs_super_num_devices(fs_info->super_copy),
total_dev);
ret = -EINVAL;
goto error;
}
- if (btrfs_super_total_bytes(root->fs_info->super_copy) <
- root->fs_info->fs_devices->total_rw_bytes) {
- btrfs_err(root->fs_info,
+ if (btrfs_super_total_bytes(fs_info->super_copy) <
+ fs_info->fs_devices->total_rw_bytes) {
+ btrfs_err(fs_info,
"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
- btrfs_super_total_bytes(root->fs_info->super_copy),
- root->fs_info->fs_devices->total_rw_bytes);
+ btrfs_super_total_bytes(fs_info->super_copy),
+ fs_info->fs_devices->total_rw_bytes);
ret = -EINVAL;
goto error;
}
ret = 0;
error:
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&uuid_mutex);
btrfs_free_path(path);
@@ -6882,7 +6865,7 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
while (fs_devices) {
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list)
- device->dev_root = fs_info->dev_root;
+ device->fs_info = fs_info;
mutex_unlock(&fs_devices->device_list_mutex);
fs_devices = fs_devices->seed;
@@ -6959,9 +6942,10 @@ out:
}
static int update_dev_stat_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *dev_root,
+ struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
+ struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *eb;
@@ -6977,7 +6961,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
BUG_ON(!path);
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
- btrfs_warn_in_rcu(dev_root->fs_info,
+ btrfs_warn_in_rcu(fs_info,
"error %d while searching for dev_stats item for device %s",
ret, rcu_str_deref(device->name));
goto out;
@@ -6988,7 +6972,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
/* need to delete old one and insert a new one */
ret = btrfs_del_item(trans, dev_root, path);
if (ret != 0) {
- btrfs_warn_in_rcu(dev_root->fs_info,
+ btrfs_warn_in_rcu(fs_info,
"delete too small dev_stats item for device %s failed %d",
rcu_str_deref(device->name), ret);
goto out;
@@ -7002,7 +6986,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, dev_root, path,
&key, sizeof(*ptr));
if (ret < 0) {
- btrfs_warn_in_rcu(dev_root->fs_info,
+ btrfs_warn_in_rcu(fs_info,
"insert dev_stats item for device %s failed %d",
rcu_str_deref(device->name), ret);
goto out;
@@ -7027,7 +7011,6 @@ out:
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
- struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
int stats_cnt;
@@ -7039,7 +7022,7 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
continue;
stats_cnt = atomic_read(&device->dev_stats_ccnt);
- ret = update_dev_stat_item(trans, dev_root, device);
+ ret = update_dev_stat_item(trans, fs_info, device);
if (!ret)
atomic_sub(stats_cnt, &device->dev_stats_ccnt);
}
@@ -7058,7 +7041,7 @@ static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
{
if (!dev->dev_stats_valid)
return;
- btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
+ btrfs_err_rl_in_rcu(dev->fs_info,
"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
rcu_str_deref(dev->name),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
@@ -7078,7 +7061,7 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
if (i == BTRFS_DEV_STAT_VALUES_MAX)
return; /* all values == 0, suppress message */
- btrfs_info_in_rcu(dev->dev_root->fs_info,
+ btrfs_info_in_rcu(dev->fs_info,
"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
rcu_str_deref(dev->name),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
@@ -7088,24 +7071,22 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}
-int btrfs_get_dev_stats(struct btrfs_root *root,
+int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats)
{
struct btrfs_device *dev;
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
int i;
mutex_lock(&fs_devices->device_list_mutex);
- dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info, stats->devid, NULL, NULL);
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
- btrfs_warn(root->fs_info,
- "get dev_stats failed, device not found");
+ btrfs_warn(fs_info, "get dev_stats failed, device not found");
return -ENODEV;
} else if (!dev->dev_stats_valid) {
- btrfs_warn(root->fs_info,
- "get dev_stats failed, not yet valid");
+ btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
return -ENODEV;
} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
@@ -7168,18 +7149,18 @@ void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
return;
mutex_lock(&fs_devices->device_list_mutex);
- lock_chunks(fs_info->dev_root);
+ mutex_lock(&fs_info->chunk_mutex);
list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
resized_list) {
list_del_init(&curr->resized_list);
curr->commit_total_bytes = curr->disk_total_bytes;
}
- unlock_chunks(fs_info->dev_root);
+ mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_devices->device_list_mutex);
}
/* Must be invoked during the transaction commit */
-void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
+void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *transaction)
{
struct extent_map *em;
@@ -7191,7 +7172,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
return;
/* In order to kick the device replace finish process */
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
list_for_each_entry(em, &transaction->pending_chunks, list) {
map = em->map_lookup;
@@ -7200,7 +7181,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
dev->commit_bytes_used = dev->bytes_used;
}
}
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
}
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index f137ffe6654c..24ba6bc3ec34 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -51,8 +51,7 @@ struct btrfs_device {
struct list_head dev_list;
struct list_head dev_alloc_list;
struct btrfs_fs_devices *fs_devices;
-
- struct btrfs_root *dev_root;
+ struct btrfs_fs_info *fs_info;
struct rcu_string *name;
@@ -371,27 +370,48 @@ struct btrfs_balance_control {
struct btrfs_balance_progress stat;
};
+enum btrfs_map_op {
+ BTRFS_MAP_READ,
+ BTRFS_MAP_WRITE,
+ BTRFS_MAP_DISCARD,
+ BTRFS_MAP_GET_READ_MIRRORS,
+};
+
+static inline enum btrfs_map_op btrfs_op(struct bio *bio)
+{
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ return BTRFS_MAP_DISCARD;
+ case REQ_OP_WRITE:
+ return BTRFS_MAP_WRITE;
+ default:
+ WARN_ON_ONCE(1);
+ case REQ_OP_READ:
+ return BTRFS_MAP_READ;
+ }
+}
+
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
u64 end, u64 *length);
void btrfs_get_bbio(struct btrfs_bio *bbio);
void btrfs_put_bbio(struct btrfs_bio *bbio);
-int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num);
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num,
int need_raid_map);
int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len);
-int btrfs_read_sys_array(struct btrfs_root *root);
-int btrfs_read_chunk_tree(struct btrfs_root *root);
+int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
+int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 type);
+ struct btrfs_fs_info *fs_info, u64 type);
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
-int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
+int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
@@ -401,16 +421,17 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step);
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
struct btrfs_device *device, struct btrfs_device *this_dev);
-int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
+int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device **device);
-int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
+int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
char *devpath,
struct btrfs_device **device);
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
const u8 *uuid);
-int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid);
+int btrfs_rm_device(struct btrfs_fs_info *fs_info,
+ char *device_path, u64 devid);
void btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
@@ -418,8 +439,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
u8 *uuid, u8 *fsid);
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
-int btrfs_init_new_device(struct btrfs_root *root, char *path);
-int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path);
+int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ char *device_path,
struct btrfs_device *srcdev,
struct btrfs_device **device_out);
int btrfs_balance(struct btrfs_balance_control *bctl,
@@ -430,7 +452,7 @@ int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
-int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
+int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
struct btrfs_device *device, u64 num_bytes,
u64 search_start, u64 *start, u64 *max_avail);
@@ -438,7 +460,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *max_avail);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
-int btrfs_get_dev_stats(struct btrfs_root *root,
+int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats);
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
@@ -455,14 +477,14 @@ void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path);
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
u64 logical, u64 len, int mirror_num);
-unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct btrfs_mapping_tree *map_tree,
u64 logical);
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
+ struct btrfs_fs_info *fs_info,
u64 chunk_offset, u64 chunk_size);
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 chunk_offset);
+ struct btrfs_fs_info *fs_info, u64 chunk_offset);
static inline int btrfs_dev_stats_dirty(struct btrfs_device *dev)
{
@@ -509,19 +531,9 @@ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
}
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
-void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
+void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *transaction);
-static inline void lock_chunks(struct btrfs_root *root)
-{
- mutex_lock(&root->fs_info->chunk_mutex);
-}
-
-static inline void unlock_chunks(struct btrfs_root *root)
-{
- mutex_unlock(&root->fs_info->chunk_mutex);
-}
-
struct list_head *btrfs_get_fs_uuids(void);
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index fccbf5567e78..9621c7f2503e 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -94,11 +94,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
{
struct btrfs_dir_item *di = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
size_t name_len = strlen(name);
int ret = 0;
- if (name_len + size > BTRFS_MAX_XATTR_SIZE(root))
+ if (name_len + size > BTRFS_MAX_XATTR_SIZE(root->fs_info))
return -ENOSPC;
path = btrfs_alloc_path();
@@ -149,14 +150,14 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
*/
ret = 0;
btrfs_assert_tree_locked(path->nodes[0]);
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (!di && !(flags & XATTR_REPLACE)) {
ret = -ENOSPC;
goto out;
}
} else if (ret == -EEXIST) {
ret = 0;
- di = btrfs_match_dir_item_name(root, path, name, name_len);
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
ASSERT(di); /* logic error */
} else if (ret) {
goto out;
@@ -185,7 +186,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
char *ptr;
if (size > old_data_len) {
- if (btrfs_leaf_free_space(root, leaf) <
+ if (btrfs_leaf_free_space(fs_info, leaf) <
(size - old_data_len)) {
ret = -ENOSPC;
goto out;
@@ -195,16 +196,17 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
if (old_data_len + name_len + sizeof(*di) == item_size) {
/* No other xattrs packed in the same leaf item. */
if (size > old_data_len)
- btrfs_extend_item(root, path,
+ btrfs_extend_item(fs_info, path,
size - old_data_len);
else if (size < old_data_len)
- btrfs_truncate_item(root, path, data_size, 1);
+ btrfs_truncate_item(fs_info, path,
+ data_size, 1);
} else {
/* There are other xattrs packed in the same item. */
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto out;
- btrfs_extend_item(root, path, data_size);
+ btrfs_extend_item(fs_info, path, data_size);
}
item = btrfs_item_nr(slot);
@@ -257,7 +259,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
out:
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -265,6 +267,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct btrfs_key key;
struct inode *inode = d_inode(dentry);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret = 0;
@@ -333,7 +336,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
u32 this_len = sizeof(*di) + name_len + data_len;
unsigned long name_ptr = (unsigned long)(di + 1);
- if (verify_dir_item(root, leaf, di)) {
+ if (verify_dir_item(fs_info, leaf, di)) {
ret = -EIO;
goto err;
}
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 441b81a3e545..da497f184ff4 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -210,10 +210,9 @@ out:
return ret;
}
-static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
+static int zlib_decompress_bio(struct list_head *ws, struct page **pages_in,
u64 disk_start,
- struct bio_vec *bvec,
- int vcnt,
+ struct bio *orig_bio,
size_t srclen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -222,10 +221,8 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
char *data_in;
size_t total_out = 0;
unsigned long page_in_index = 0;
- unsigned long page_out_index = 0;
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
- unsigned long pg_offset;
data_in = kmap(pages_in[page_in_index]);
workspace->strm.next_in = data_in;
@@ -235,7 +232,6 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
workspace->strm.total_out = 0;
workspace->strm.next_out = workspace->buf;
workspace->strm.avail_out = PAGE_SIZE;
- pg_offset = 0;
/* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */
@@ -250,6 +246,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
pr_warn("BTRFS: inflateInit failed\n");
+ kunmap(pages_in[page_in_index]);
return -EIO;
}
while (workspace->strm.total_in < srclen) {
@@ -266,8 +263,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
total_out, disk_start,
- bvec, vcnt,
- &page_out_index, &pg_offset);
+ orig_bio);
if (ret2 == 0) {
ret = 0;
goto done;
@@ -300,7 +296,7 @@ done:
if (data_in)
kunmap(pages_in[page_in_index]);
if (!ret)
- btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
+ zero_fill_bio(orig_bio);
return ret;
}
@@ -407,6 +403,6 @@ const struct btrfs_compress_op btrfs_zlib_compress = {
.alloc_workspace = zlib_alloc_workspace,
.free_workspace = zlib_free_workspace,
.compress_pages = zlib_compress_pages,
- .decompress_biovec = zlib_decompress_biovec,
+ .decompress_bio = zlib_decompress_bio,
.decompress = zlib_decompress,
};
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index e030d6f6c19a..c14bed4ab097 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -698,10 +698,10 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
- TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
- TP_ARGS(root, map, offset, size),
+ TP_ARGS(fs_info, map, offset, size),
TP_STRUCT__entry_btrfs(
__field( int, num_stripes )
@@ -712,13 +712,13 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__field( u64, root_objectid )
),
- TP_fast_assign_btrfs(root->fs_info,
+ TP_fast_assign_btrfs(fs_info,
__entry->num_stripes = map->num_stripes;
__entry->type = map->type;
__entry->sub_stripes = map->sub_stripes;
__entry->offset = offset;
__entry->size = size;
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = fs_info->chunk_root->root_key.objectid;
),
TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
@@ -732,18 +732,18 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
- TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
- TP_ARGS(root, map, offset, size)
+ TP_ARGS(fs_info, map, offset, size)
);
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
- TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
- TP_ARGS(root, map, offset, size)
+ TP_ARGS(fs_info, map, offset, size)
);
TRACE_EVENT(btrfs_cow_block,
@@ -891,65 +891,61 @@ TRACE_EVENT(btrfs_flush_space,
DECLARE_EVENT_CLASS(btrfs__reserved_extent,
- TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
- TP_ARGS(root, start, len),
+ TP_ARGS(fs_info, start, len),
TP_STRUCT__entry_btrfs(
- __field( u64, root_objectid )
__field( u64, start )
__field( u64, len )
),
- TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ TP_fast_assign_btrfs(fs_info,
__entry->start = start;
__entry->len = len;
),
TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
- show_root_type(__entry->root_objectid),
+ show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
(unsigned long long)__entry->start,
(unsigned long long)__entry->len)
);
DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
- TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
- TP_ARGS(root, start, len)
+ TP_ARGS(fs_info, start, len)
);
DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
- TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
- TP_ARGS(root, start, len)
+ TP_ARGS(fs_info, start, len)
);
TRACE_EVENT(find_free_extent,
- TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
u64 data),
- TP_ARGS(root, num_bytes, empty_size, data),
+ TP_ARGS(fs_info, num_bytes, empty_size, data),
TP_STRUCT__entry_btrfs(
- __field( u64, root_objectid )
__field( u64, num_bytes )
__field( u64, empty_size )
__field( u64, data )
),
- TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ TP_fast_assign_btrfs(fs_info,
__entry->num_bytes = num_bytes;
__entry->empty_size = empty_size;
__entry->data = data;
),
- TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
- "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
+ TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
+ show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
BTRFS_GROUP_FLAGS))
@@ -957,22 +953,20 @@ TRACE_EVENT(find_free_extent,
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
- TP_PROTO(struct btrfs_root *root,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(root, block_group, start, len),
+ TP_ARGS(fs_info, block_group, start, len),
TP_STRUCT__entry_btrfs(
- __field( u64, root_objectid )
__field( u64, bg_objectid )
__field( u64, flags )
__field( u64, start )
__field( u64, len )
),
- TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ TP_fast_assign_btrfs(fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->flags = block_group->flags;
__entry->start = start;
@@ -981,7 +975,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
"start = %Lu, len = %Lu",
- show_root_type(__entry->root_objectid), __entry->bg_objectid,
+ show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
+ __entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
"|", BTRFS_GROUP_FLAGS),
__entry->start, __entry->len)
@@ -989,20 +984,20 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
- TP_PROTO(struct btrfs_root *root,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(root, block_group, start, len)
+ TP_ARGS(fs_info, block_group, start, len)
);
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
- TP_PROTO(struct btrfs_root *root,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
- TP_ARGS(root, block_group, start, len)
+ TP_ARGS(fs_info, block_group, start, len)
);
TRACE_EVENT(btrfs_find_cluster,
@@ -1406,7 +1401,7 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
TP_ARGS(fs_info, rec)
);
-DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_extent_record *rec),