diff options
Diffstat (limited to 'fs')
423 files changed, 18300 insertions, 15534 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index f08fbbfafd9a..d1ad3935fb85 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -166,7 +166,7 @@ config TMPFS space. If you unmount a tmpfs instance, everything stored therein is lost. - See <file:Documentation/filesystems/tmpfs.txt> for details. + See <file:Documentation/filesystems/tmpfs.rst> for details. config TMPFS_POSIX_ACL bool "Tmpfs POSIX Access Control Lists" diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 62dc4f577ba1..04f86b8c100e 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -36,6 +36,12 @@ config COMPAT_BINFMT_ELF config ARCH_BINFMT_ELF_STATE bool +config ARCH_HAVE_ELF_PROT + bool + +config ARCH_USE_GNU_PROPERTY + bool + config BINFMT_ELF_FDPIC bool "Kernel support for FDPIC ELF binaries" default y if !BINFMT_ELF @@ -72,7 +78,7 @@ config CORE_DUMP_DEFAULT_ELF_HEADERS The core dump behavior can be controlled per process using the /proc/PID/coredump_filter pseudo-file; this setting is - inherited. See Documentation/filesystems/proc.txt for details. + inherited. See Documentation/filesystems/proc.rst for details. This config option changes the default setting of coredump_filter seen at boot time. If unsure, say Y. diff --git a/fs/adfs/Kconfig b/fs/adfs/Kconfig index df4650dccf68..44738fed6625 100644 --- a/fs/adfs/Kconfig +++ b/fs/adfs/Kconfig @@ -12,7 +12,7 @@ config ADFS_FS The ADFS partition should be the first partition (i.e., /dev/[hs]d?1) on each of your drives. Please read the file - <file:Documentation/filesystems/adfs.txt> for further details. + <file:Documentation/filesystems/adfs.rst> for further details. To compile this code as a module, choose M here: the module will be called adfs. diff --git a/fs/affs/Kconfig b/fs/affs/Kconfig index 84c46b9025c5..eb9d0ab850cb 100644 --- a/fs/affs/Kconfig +++ b/fs/affs/Kconfig @@ -9,7 +9,7 @@ config AFFS_FS FFS partition on your hard drive. Amiga floppies however cannot be read with this driver due to an incompatibility of the floppy controller used in an Amiga and the standard floppy controller in - PCs and workstations. Read <file:Documentation/filesystems/affs.txt> + PCs and workstations. Read <file:Documentation/filesystems/affs.rst> and <file:fs/affs/Changes>. With this driver you can also mount disk files used by Bernd diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig index 3fb1f559e317..1ad211d72b3b 100644 --- a/fs/afs/Kconfig +++ b/fs/afs/Kconfig @@ -8,7 +8,7 @@ config AFS_FS If you say Y here, you will get an experimental Andrew File System driver. It currently only supports unsecured read-only AFS access. - See <file:Documentation/filesystems/afs.txt> for more information. + See <file:Documentation/filesystems/afs.rst> for more information. If unsure, say N. @@ -18,7 +18,7 @@ config AFS_DEBUG help Say Y here to make runtime controllable debugging messages appear. - See <file:Documentation/filesystems/afs.txt> for more information. + See <file:Documentation/filesystems/afs.rst> for more information. If unsure, say N. @@ -37,6 +37,6 @@ config AFS_DEBUG_CURSOR the dmesg log if the server rotation algorithm fails to successfully contact a server. - See <file:Documentation/filesystems/afs.txt> for more information. + See <file:Documentation/filesystems/afs.rst> for more information. If unsure, say N. diff --git a/fs/afs/Makefile b/fs/afs/Makefile index 10359bea7070..75c4e4043d1d 100644 --- a/fs/afs/Makefile +++ b/fs/afs/Makefile @@ -18,6 +18,7 @@ kafs-y := \ file.o \ flock.o \ fsclient.o \ + fs_operation.o \ fs_probe.o \ inode.o \ main.o \ @@ -30,6 +31,7 @@ kafs-y := \ server_list.o \ super.o \ vlclient.o \ + vl_alias.o \ vl_list.o \ vl_probe.o \ vl_rotate.o \ diff --git a/fs/afs/afs.h b/fs/afs/afs.h index b6d49d646ade..432cb4b23961 100644 --- a/fs/afs/afs.h +++ b/fs/afs/afs.h @@ -10,7 +10,7 @@ #include <linux/in.h> -#define AFS_MAXCELLNAME 64 /* Maximum length of a cell name */ +#define AFS_MAXCELLNAME 256 /* Maximum length of a cell name */ #define AFS_MAXVOLNAME 64 /* Maximum length of a volume name */ #define AFS_MAXNSERVERS 8 /* Maximum servers in a basic volume record */ #define AFS_NMAXNSERVERS 13 /* Maximum servers in a N/U-class volume record */ @@ -146,7 +146,6 @@ struct afs_file_status { struct afs_status_cb { struct afs_file_status status; struct afs_callback callback; - unsigned int cb_break; /* Pre-op callback break counter */ bool have_status; /* True if status record was retrieved */ bool have_cb; /* True if cb record was retrieved */ bool have_error; /* True if status.abort_code indicates an error */ diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h index e9b8029920ec..9c65ffb8a523 100644 --- a/fs/afs/afs_vl.h +++ b/fs/afs/afs_vl.h @@ -22,6 +22,7 @@ enum AFSVL_Operations { VLGETENTRYBYNAMEU = 527, /* AFS Get VLDB entry by name (UUID-variant) */ VLGETADDRSU = 533, /* AFS Get addrs for fileserver */ YVLGETENDPOINTS = 64002, /* YFS Get endpoints for file/volume server */ + YVLGETCELLNAME = 64014, /* YFS Get actual cell name */ VLGETCAPABILITIES = 65537, /* AFS Get server capabilities */ }; diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 2dca8df1a18d..7d9b23d981bf 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -21,192 +21,17 @@ #include "internal.h" /* - * Create volume and callback interests on a server. - */ -static struct afs_cb_interest *afs_create_interest(struct afs_server *server, - struct afs_vnode *vnode) -{ - struct afs_vol_interest *new_vi, *vi; - struct afs_cb_interest *new; - struct hlist_node **pp; - - new_vi = kzalloc(sizeof(struct afs_vol_interest), GFP_KERNEL); - if (!new_vi) - return NULL; - - new = kzalloc(sizeof(struct afs_cb_interest), GFP_KERNEL); - if (!new) { - kfree(new_vi); - return NULL; - } - - new_vi->usage = 1; - new_vi->vid = vnode->volume->vid; - INIT_HLIST_NODE(&new_vi->srv_link); - INIT_HLIST_HEAD(&new_vi->cb_interests); - - refcount_set(&new->usage, 1); - new->sb = vnode->vfs_inode.i_sb; - new->vid = vnode->volume->vid; - new->server = afs_get_server(server, afs_server_trace_get_new_cbi); - INIT_HLIST_NODE(&new->cb_vlink); - - write_lock(&server->cb_break_lock); - - for (pp = &server->cb_volumes.first; *pp; pp = &(*pp)->next) { - vi = hlist_entry(*pp, struct afs_vol_interest, srv_link); - if (vi->vid < new_vi->vid) - continue; - if (vi->vid > new_vi->vid) - break; - vi->usage++; - goto found_vi; - } - - new_vi->srv_link.pprev = pp; - new_vi->srv_link.next = *pp; - if (*pp) - (*pp)->pprev = &new_vi->srv_link.next; - *pp = &new_vi->srv_link; - vi = new_vi; - new_vi = NULL; -found_vi: - - new->vol_interest = vi; - hlist_add_head(&new->cb_vlink, &vi->cb_interests); - - write_unlock(&server->cb_break_lock); - kfree(new_vi); - return new; -} - -/* - * Set up an interest-in-callbacks record for a volume on a server and - * register it with the server. - * - Called with vnode->io_lock held. - */ -int afs_register_server_cb_interest(struct afs_vnode *vnode, - struct afs_server_list *slist, - unsigned int index) -{ - struct afs_server_entry *entry = &slist->servers[index]; - struct afs_cb_interest *cbi, *vcbi, *new, *old; - struct afs_server *server = entry->server; - -again: - vcbi = rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->io_lock)); - if (vcbi && likely(vcbi == entry->cb_interest)) - return 0; - - read_lock(&slist->lock); - cbi = afs_get_cb_interest(entry->cb_interest); - read_unlock(&slist->lock); - - if (vcbi) { - if (vcbi == cbi) { - afs_put_cb_interest(afs_v2net(vnode), cbi); - return 0; - } - - /* Use a new interest in the server list for the same server - * rather than an old one that's still attached to a vnode. - */ - if (cbi && vcbi->server == cbi->server) { - write_seqlock(&vnode->cb_lock); - old = rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->cb_lock.lock)); - rcu_assign_pointer(vnode->cb_interest, cbi); - write_sequnlock(&vnode->cb_lock); - afs_put_cb_interest(afs_v2net(vnode), old); - return 0; - } - - /* Re-use the one attached to the vnode. */ - if (!cbi && vcbi->server == server) { - write_lock(&slist->lock); - if (entry->cb_interest) { - write_unlock(&slist->lock); - afs_put_cb_interest(afs_v2net(vnode), cbi); - goto again; - } - - entry->cb_interest = cbi; - write_unlock(&slist->lock); - return 0; - } - } - - if (!cbi) { - new = afs_create_interest(server, vnode); - if (!new) - return -ENOMEM; - - write_lock(&slist->lock); - if (!entry->cb_interest) { - entry->cb_interest = afs_get_cb_interest(new); - cbi = new; - new = NULL; - } else { - cbi = afs_get_cb_interest(entry->cb_interest); - } - write_unlock(&slist->lock); - afs_put_cb_interest(afs_v2net(vnode), new); - } - - ASSERT(cbi); - - /* Change the server the vnode is using. This entails scrubbing any - * interest the vnode had in the previous server it was using. - */ - write_seqlock(&vnode->cb_lock); - - old = rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->cb_lock.lock)); - rcu_assign_pointer(vnode->cb_interest, cbi); - vnode->cb_s_break = cbi->server->cb_s_break; - vnode->cb_v_break = vnode->volume->cb_v_break; - clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); - - write_sequnlock(&vnode->cb_lock); - afs_put_cb_interest(afs_v2net(vnode), old); - return 0; -} - -/* - * Remove an interest on a server. - */ -void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi) -{ - struct afs_vol_interest *vi; - - if (cbi && refcount_dec_and_test(&cbi->usage)) { - if (!hlist_unhashed(&cbi->cb_vlink)) { - write_lock(&cbi->server->cb_break_lock); - - hlist_del_init(&cbi->cb_vlink); - vi = cbi->vol_interest; - cbi->vol_interest = NULL; - if (--vi->usage == 0) - hlist_del(&vi->srv_link); - else - vi = NULL; - - write_unlock(&cbi->server->cb_break_lock); - if (vi) - kfree_rcu(vi, rcu); - afs_put_server(net, cbi->server, afs_server_trace_put_cbi); - } - kfree_rcu(cbi, rcu); - } -} - -/* - * allow the fileserver to request callback state (re-)initialisation + * Allow the fileserver to request callback state (re-)initialisation. + * Unfortunately, UUIDs are not guaranteed unique. */ void afs_init_callback_state(struct afs_server *server) { - server->cb_s_break++; + rcu_read_lock(); + do { + server->cb_s_break++; + server = rcu_dereference(server->uuid_next); + } while (0); + rcu_read_unlock(); } /* @@ -238,69 +63,109 @@ void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason } /* + * Look up a volume by volume ID under RCU conditions. + */ +static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell, + afs_volid_t vid) +{ + struct afs_volume *volume = NULL; + struct rb_node *p; + int seq = 0; + + do { + /* Unfortunately, rbtree walking doesn't give reliable results + * under just the RCU read lock, so we have to check for + * changes. + */ + read_seqbegin_or_lock(&cell->volume_lock, &seq); + + p = rcu_dereference_raw(cell->volumes.rb_node); + while (p) { + volume = rb_entry(p, struct afs_volume, cell_node); + + if (volume->vid < vid) + p = rcu_dereference_raw(p->rb_left); + else if (volume->vid > vid) + p = rcu_dereference_raw(p->rb_right); + else + break; + volume = NULL; + } + + } while (need_seqretry(&cell->volume_lock, seq)); + + done_seqretry(&cell->volume_lock, seq); + return volume; +} + +/* * allow the fileserver to explicitly break one callback * - happens when * - the backing file is changed * - a lock is released */ -static void afs_break_one_callback(struct afs_server *server, +static void afs_break_one_callback(struct afs_volume *volume, struct afs_fid *fid) { - struct afs_vol_interest *vi; - struct afs_cb_interest *cbi; - struct afs_iget_data data; + struct super_block *sb; struct afs_vnode *vnode; struct inode *inode; - read_lock(&server->cb_break_lock); - hlist_for_each_entry(vi, &server->cb_volumes, srv_link) { - if (vi->vid < fid->vid) - continue; - if (vi->vid > fid->vid) { - vi = NULL; - break; - } - //atomic_inc(&vi->usage); - break; + if (fid->vnode == 0 && fid->unique == 0) { + /* The callback break applies to an entire volume. */ + write_lock(&volume->cb_v_break_lock); + volume->cb_v_break++; + trace_afs_cb_break(fid, volume->cb_v_break, + afs_cb_break_for_volume_callback, false); + write_unlock(&volume->cb_v_break_lock); + return; } - /* TODO: Find all matching volumes if we couldn't match the server and - * break them anyway. + /* See if we can find a matching inode - even an I_NEW inode needs to + * be marked as it can have its callback broken before we finish + * setting up the local inode. */ - if (!vi) - goto out; + sb = rcu_dereference(volume->sb); + if (!sb) + return; + + inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid); + if (inode) { + vnode = AFS_FS_I(inode); + afs_break_callback(vnode, afs_cb_break_for_callback); + } else { + trace_afs_cb_miss(fid, afs_cb_break_for_callback); + } +} + +static void afs_break_some_callbacks(struct afs_server *server, + struct afs_callback_break *cbb, + size_t *_count) +{ + struct afs_callback_break *residue = cbb; + struct afs_volume *volume; + afs_volid_t vid = cbb->fid.vid; + size_t i; - /* Step through all interested superblocks. There may be more than one - * because of cell aliasing. + volume = afs_lookup_volume_rcu(server->cell, vid); + + /* TODO: Find all matching volumes if we couldn't match the server and + * break them anyway. */ - hlist_for_each_entry(cbi, &vi->cb_interests, cb_vlink) { - if (fid->vnode == 0 && fid->unique == 0) { - /* The callback break applies to an entire volume. */ - struct afs_super_info *as = AFS_FS_S(cbi->sb); - struct afs_volume *volume = as->volume; - write_lock(&volume->cb_v_break_lock); - volume->cb_v_break++; - trace_afs_cb_break(fid, volume->cb_v_break, - afs_cb_break_for_volume_callback, false); - write_unlock(&volume->cb_v_break_lock); + for (i = *_count; i > 0; cbb++, i--) { + if (cbb->fid.vid == vid) { + _debug("- Fid { vl=%08llx n=%llu u=%u }", + cbb->fid.vid, + cbb->fid.vnode, + cbb->fid.unique); + --*_count; + if (volume) + afs_break_one_callback(volume, &cbb->fid); } else { - data.volume = NULL; - data.fid = *fid; - inode = ilookup5_nowait(cbi->sb, fid->vnode, - afs_iget5_test, &data); - if (inode) { - vnode = AFS_FS_I(inode); - afs_break_callback(vnode, afs_cb_break_for_callback); - iput(inode); - } else { - trace_afs_cb_miss(fid, afs_cb_break_for_callback); - } + *residue++ = *cbb; } } - -out: - read_unlock(&server->cb_break_lock); } /* @@ -313,29 +178,11 @@ void afs_break_callbacks(struct afs_server *server, size_t count, ASSERT(server != NULL); - /* TODO: Sort the callback break list by volume ID */ + rcu_read_lock(); - for (; count > 0; callbacks++, count--) { - _debug("- Fid { vl=%08llx n=%llu u=%u }", - callbacks->fid.vid, - callbacks->fid.vnode, - callbacks->fid.unique); - afs_break_one_callback(server, &callbacks->fid); - } + while (count > 0) + afs_break_some_callbacks(server, callbacks, &count); - _leave(""); + rcu_read_unlock(); return; } - -/* - * Clear the callback interests in a server list. - */ -void afs_clear_callback_interests(struct afs_net *net, struct afs_server_list *slist) -{ - int i; - - for (i = 0; i < slist->nr_servers; i++) { - afs_put_cb_interest(net, slist->servers[i].cb_interest); - slist->servers[i].cb_interest = NULL; - } -} diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 78ba5f932287..005921e3b38d 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -161,9 +161,13 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, atomic_set(&cell->usage, 2); INIT_WORK(&cell->manager, afs_manage_cell); - INIT_LIST_HEAD(&cell->proc_volumes); - rwlock_init(&cell->proc_lock); + cell->volumes = RB_ROOT; + INIT_HLIST_HEAD(&cell->proc_volumes); + seqlock_init(&cell->volume_lock); + cell->fs_servers = RB_ROOT; + seqlock_init(&cell->fs_lock); rwlock_init(&cell->vl_servers_lock); + cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS); /* Provide a VL server list, filling it in if we were given a list of * addresses to use. @@ -481,7 +485,9 @@ static void afs_cell_destroy(struct rcu_head *rcu) ASSERTCMP(atomic_read(&cell->usage), ==, 0); + afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root); afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); + afs_put_cell(cell->net, cell->alias_of); key_put(cell->anonymous_key); kfree(cell); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 380ad5ace7cf..bef413818af7 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -118,8 +118,6 @@ bool afs_cm_incoming_call(struct afs_call *call) { _enter("{%u, CB.OP %u}", call->service_id, call->operation_ID); - call->epoch = rxrpc_kernel_get_epoch(call->net->socket, call->rxcall); - switch (call->operation_ID) { case CBCallBack: call->type = &afs_SRXCBCallBack; @@ -150,49 +148,6 @@ bool afs_cm_incoming_call(struct afs_call *call) } /* - * Record a probe to the cache manager from a server. - */ -static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server) -{ - _enter(""); - - if (test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags) && - !test_bit(AFS_SERVER_FL_PROBING, &server->flags)) { - if (server->cm_epoch == call->epoch) - return 0; - - if (!server->probe.said_rebooted) { - pr_notice("kAFS: FS rebooted %pU\n", &server->uuid); - server->probe.said_rebooted = true; - } - } - - spin_lock(&server->probe_lock); - - if (!test_and_set_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) { - server->cm_epoch = call->epoch; - server->probe.cm_epoch = call->epoch; - goto out; - } - - if (server->probe.cm_probed && - call->epoch != server->probe.cm_epoch && - !server->probe.said_inconsistent) { - pr_notice("kAFS: FS endpoints inconsistent %pU\n", - &server->uuid); - server->probe.said_inconsistent = true; - } - - if (!server->probe.cm_probed || call->epoch == server->cm_epoch) - server->probe.cm_epoch = server->cm_epoch; - -out: - server->probe.cm_probed = true; - spin_unlock(&server->probe_lock); - return 0; -} - -/* * Find the server record by peer address and record a probe to the cache * manager from a server. */ @@ -210,7 +165,7 @@ static int afs_find_cm_server_by_peer(struct afs_call *call) } call->server = server; - return afs_record_cm_probe(call, server); + return 0; } /* @@ -231,7 +186,7 @@ static int afs_find_cm_server_by_uuid(struct afs_call *call, } call->server = server; - return afs_record_cm_probe(call, server); + return 0; } /* @@ -268,7 +223,9 @@ static void SRXAFSCB_CallBack(struct work_struct *work) * to maintain cache coherency. */ if (call->server) { - trace_afs_server(call->server, atomic_read(&call->server->usage), + trace_afs_server(call->server, + atomic_read(&call->server->ref), + atomic_read(&call->server->active), afs_server_trace_callback); afs_break_callbacks(call->server, call->count, call->request); } @@ -305,8 +262,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) call->count = ntohl(call->tmp); _debug("FID count: %u", call->count); if (call->count > AFSCBMAX) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_cb_fid_count); + return afs_protocol_error(call, afs_eproto_cb_fid_count); call->buffer = kmalloc(array3_size(call->count, 3, 4), GFP_KERNEL); @@ -351,8 +307,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) call->count2 = ntohl(call->tmp); _debug("CB count: %u", call->count2); if (call->count2 != call->count && call->count2 != 0) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_cb_count); + return afs_protocol_error(call, afs_eproto_cb_count); call->iter = &call->def_iter; iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4); call->unmarshall++; @@ -509,7 +464,8 @@ static int afs_deliver_cb_probe(struct afs_call *call) } /* - * allow the fileserver to quickly find out if the fileserver has been rebooted + * Allow the fileserver to quickly find out if the cache manager has been + * rebooted. */ static void SRXAFSCB_ProbeUuid(struct work_struct *work) { @@ -581,7 +537,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) return afs_io_error(call, afs_io_error_cm_reply); - return afs_find_cm_server_by_uuid(call, call->request); + return afs_find_cm_server_by_peer(call); } /* @@ -672,8 +628,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call) call->count = ntohl(call->tmp); _debug("FID count: %u", call->count); if (call->count > YFSCBMAX) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_cb_fid_count); + return afs_protocol_error(call, afs_eproto_cb_fid_count); size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid)); call->buffer = kmalloc(size, GFP_KERNEL); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index d1e1caa23c8b..25cbe0aeeec5 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -99,8 +99,6 @@ struct afs_lookup_cookie { bool found; bool one_only; unsigned short nr_fids; - struct inode **inodes; - struct afs_status_cb *statuses; struct afs_fid fids[50]; }; @@ -618,8 +616,8 @@ static int afs_lookup_filldir(struct dir_context *ctx, const char *name, } } else if (cookie->name.len == nlen && memcmp(cookie->name.name, name, nlen) == 0) { - cookie->fids[0].vnode = ino; - cookie->fids[0].unique = dtype; + cookie->fids[1].vnode = ino; + cookie->fids[1].unique = dtype; cookie->found = 1; if (cookie->one_only) return -1; @@ -631,6 +629,111 @@ static int afs_lookup_filldir(struct dir_context *ctx, const char *name, } /* + * Deal with the result of a successful lookup operation. Turn all the files + * into inodes and save the first one - which is the one we actually want. + */ +static void afs_do_lookup_success(struct afs_operation *op) +{ + struct afs_vnode_param *vp; + struct afs_vnode *vnode; + struct inode *inode; + u32 abort_code; + int i; + + _enter(""); + + for (i = 0; i < op->nr_files; i++) { + switch (i) { + case 0: + vp = &op->file[0]; + abort_code = vp->scb.status.abort_code; + if (abort_code != 0) { + op->abort_code = abort_code; + op->error = afs_abort_to_error(abort_code); + } + break; + + case 1: + vp = &op->file[1]; + break; + + default: + vp = &op->more_files[i - 2]; + break; + } + + if (!vp->scb.have_status && !vp->scb.have_error) + continue; + + _debug("do [%u]", i); + if (vp->vnode) { + if (!test_bit(AFS_VNODE_UNSET, &vp->vnode->flags)) + afs_vnode_commit_status(op, vp); + } else if (vp->scb.status.abort_code == 0) { + inode = afs_iget(op, vp); + if (!IS_ERR(inode)) { + vnode = AFS_FS_I(inode); + afs_cache_permit(vnode, op->key, + 0 /* Assume vnode->cb_break is 0 */ + + op->cb_v_break, + &vp->scb); + vp->vnode = vnode; + vp->put_vnode = true; + } + } else { + _debug("- abort %d %llx:%llx.%x", + vp->scb.status.abort_code, + vp->fid.vid, vp->fid.vnode, vp->fid.unique); + } + } + + _leave(""); +} + +static const struct afs_operation_ops afs_inline_bulk_status_operation = { + .issue_afs_rpc = afs_fs_inline_bulk_status, + .issue_yfs_rpc = yfs_fs_inline_bulk_status, + .success = afs_do_lookup_success, +}; + +static const struct afs_operation_ops afs_fetch_status_operation = { + .issue_afs_rpc = afs_fs_fetch_status, + .issue_yfs_rpc = yfs_fs_fetch_status, + .success = afs_do_lookup_success, +}; + +/* + * See if we know that the server we expect to use doesn't support + * FS.InlineBulkStatus. + */ +static bool afs_server_supports_ibulk(struct afs_vnode *dvnode) +{ + struct afs_server_list *slist; + struct afs_volume *volume = dvnode->volume; + struct afs_server *server; + bool ret = true; + int i; + + if (!test_bit(AFS_VOLUME_MAYBE_NO_IBULK, &volume->flags)) + return true; + + rcu_read_lock(); + slist = rcu_dereference(volume->servers); + + for (i = 0; i < slist->nr_servers; i++) { + server = slist->servers[i].server; + if (server == dvnode->cb_server) { + if (test_bit(AFS_SERVER_FL_NO_IBULK, &server->flags)) + ret = false; + break; + } + } + + rcu_read_unlock(); + return ret; +} + +/* * Do a lookup in a directory. We make use of bulk lookup to query a slew of * files in one go and create inodes for them. The inode of the file we were * asked for is returned. @@ -639,16 +742,13 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, struct key *key) { struct afs_lookup_cookie *cookie; - struct afs_cb_interest *dcbi, *cbi = NULL; - struct afs_super_info *as = dir->i_sb->s_fs_info; - struct afs_status_cb *scb; - struct afs_iget_data iget_data; - struct afs_fs_cursor fc; - struct afs_server *server; + struct afs_vnode_param *vp; + struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; struct inode *inode = NULL, *ti; afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version); - int ret, i; + long ret; + int i; _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); @@ -656,72 +756,74 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, if (!cookie) return ERR_PTR(-ENOMEM); + for (i = 0; i < ARRAY_SIZE(cookie->fids); i++) + cookie->fids[i].vid = dvnode->fid.vid; cookie->ctx.actor = afs_lookup_filldir; cookie->name = dentry->d_name; - cookie->nr_fids = 1; /* slot 0 is saved for the fid we actually want */ - - read_seqlock_excl(&dvnode->cb_lock); - dcbi = rcu_dereference_protected(dvnode->cb_interest, - lockdep_is_held(&dvnode->cb_lock.lock)); - if (dcbi) { - server = dcbi->server; - if (server && - test_bit(AFS_SERVER_FL_NO_IBULK, &server->flags)) - cookie->one_only = true; - } - read_sequnlock_excl(&dvnode->cb_lock); + cookie->nr_fids = 2; /* slot 0 is saved for the fid we actually want + * and slot 1 for the directory */ - for (i = 0; i < 50; i++) - cookie->fids[i].vid = as->volume->vid; + if (!afs_server_supports_ibulk(dvnode)) + cookie->one_only = true; /* search the directory */ ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version); - if (ret < 0) { - inode = ERR_PTR(ret); + if (ret < 0) goto out; - } dentry->d_fsdata = (void *)(unsigned long)data_version; - inode = ERR_PTR(-ENOENT); + ret = -ENOENT; if (!cookie->found) goto out; /* Check to see if we already have an inode for the primary fid. */ - iget_data.fid = cookie->fids[0]; - iget_data.volume = dvnode->volume; - iget_data.cb_v_break = dvnode->volume->cb_v_break; - iget_data.cb_s_break = 0; - inode = ilookup5(dir->i_sb, cookie->fids[0].vnode, - afs_iget5_test, &iget_data); + inode = ilookup5(dir->i_sb, cookie->fids[1].vnode, + afs_ilookup5_test_by_fid, &cookie->fids[1]); if (inode) - goto out; + goto out; /* We do */ - /* Need space for examining all the selected files */ - inode = ERR_PTR(-ENOMEM); - cookie->statuses = kvcalloc(cookie->nr_fids, sizeof(struct afs_status_cb), - GFP_KERNEL); - if (!cookie->statuses) + /* Okay, we didn't find it. We need to query the server - and whilst + * we're doing that, we're going to attempt to look up a bunch of other + * vnodes also. + */ + op = afs_alloc_operation(NULL, dvnode->volume); + if (IS_ERR(op)) { + ret = PTR_ERR(op); goto out; + } - cookie->inodes = kcalloc(cookie->nr_fids, sizeof(struct inode *), - GFP_KERNEL); - if (!cookie->inodes) - goto out_s; + afs_op_set_vnode(op, 0, dvnode); + afs_op_set_fid(op, 1, &cookie->fids[1]); - for (i = 1; i < cookie->nr_fids; i++) { - scb = &cookie->statuses[i]; + op->nr_files = cookie->nr_fids; + _debug("nr_files %u", op->nr_files); - /* Find any inodes that already exist and get their - * callback counters. - */ - iget_data.fid = cookie->fids[i]; - ti = ilookup5_nowait(dir->i_sb, iget_data.fid.vnode, - afs_iget5_test, &iget_data); - if (!IS_ERR_OR_NULL(ti)) { - vnode = AFS_FS_I(ti); - scb->cb_break = afs_calc_vnode_cb_break(vnode); - cookie->inodes[i] = ti; + /* Need space for examining all the selected files */ + op->error = -ENOMEM; + if (op->nr_files > 2) { + op->more_files = kvcalloc(op->nr_files - 2, + sizeof(struct afs_vnode_param), + GFP_KERNEL); + if (!op->more_files) + goto out_op; + + for (i = 2; i < op->nr_files; i++) { + vp = &op->more_files[i - 2]; + vp->fid = cookie->fids[i]; + + /* Find any inodes that already exist and get their + * callback counters. + */ + ti = ilookup5_nowait(dir->i_sb, vp->fid.vnode, + afs_ilookup5_test_by_fid, &vp->fid); + if (!IS_ERR_OR_NULL(ti)) { + vnode = AFS_FS_I(ti); + vp->dv_before = vnode->status.data_version; + vp->cb_break_before = afs_calc_vnode_cb_break(vnode); + vp->vnode = vnode; + vp->put_vnode = true; + } } } @@ -729,120 +831,40 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, * lookups contained therein are stored in the reply without aborting * the whole operation. */ - if (cookie->one_only) - goto no_inline_bulk_status; - - inode = ERR_PTR(-ERESTARTSYS); - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - while (afs_select_fileserver(&fc)) { - if (test_bit(AFS_SERVER_FL_NO_IBULK, - &fc.cbi->server->flags)) { - fc.ac.abort_code = RX_INVALID_OPERATION; - fc.ac.error = -ECONNABORTED; - break; - } - iget_data.cb_v_break = dvnode->volume->cb_v_break; - iget_data.cb_s_break = fc.cbi->server->cb_s_break; - afs_fs_inline_bulk_status(&fc, - afs_v2net(dvnode), - cookie->fids, - cookie->statuses, - cookie->nr_fids, NULL); - } - - if (fc.ac.error == 0) - cbi = afs_get_cb_interest(fc.cbi); - if (fc.ac.abort_code == RX_INVALID_OPERATION) - set_bit(AFS_SERVER_FL_NO_IBULK, &fc.cbi->server->flags); - inode = ERR_PTR(afs_end_vnode_operation(&fc)); + op->error = -ENOTSUPP; + if (!cookie->one_only) { + op->ops = &afs_inline_bulk_status_operation; + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); } - if (!IS_ERR(inode)) - goto success; - if (fc.ac.abort_code != RX_INVALID_OPERATION) - goto out_c; - -no_inline_bulk_status: - /* We could try FS.BulkStatus next, but this aborts the entire op if - * any of the lookups fails - so, for the moment, revert to - * FS.FetchStatus for just the primary fid. - */ - inode = ERR_PTR(-ERESTARTSYS); - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - while (afs_select_fileserver(&fc)) { - iget_data.cb_v_break = dvnode->volume->cb_v_break; - iget_data.cb_s_break = fc.cbi->server->cb_s_break; - scb = &cookie->statuses[0]; - afs_fs_fetch_status(&fc, - afs_v2net(dvnode), - cookie->fids, - scb, - NULL); - } - - if (fc.ac.error == 0) - cbi = afs_get_cb_interest(fc.cbi); - inode = ERR_PTR(afs_end_vnode_operation(&fc)); + if (op->error == -ENOTSUPP) { + /* We could try FS.BulkStatus next, but this aborts the entire + * op if any of the lookups fails - so, for the moment, revert + * to FS.FetchStatus for op->file[1]. + */ + op->fetch_status.which = 1; + op->ops = &afs_fetch_status_operation; + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); } + inode = ERR_PTR(op->error); - if (IS_ERR(inode)) - goto out_c; - -success: - /* Turn all the files into inodes and save the first one - which is the - * one we actually want. - */ - scb = &cookie->statuses[0]; - if (scb->status.abort_code != 0) - inode = ERR_PTR(afs_abort_to_error(scb->status.abort_code)); - - for (i = 0; i < cookie->nr_fids; i++) { - struct afs_status_cb *scb = &cookie->statuses[i]; - - if (!scb->have_status && !scb->have_error) - continue; - - if (cookie->inodes[i]) { - struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]); - - if (test_bit(AFS_VNODE_UNSET, &iv->flags)) - continue; - - afs_vnode_commit_status(&fc, iv, - scb->cb_break, NULL, scb); - continue; - } - - if (scb->status.abort_code != 0) - continue; - - iget_data.fid = cookie->fids[i]; - ti = afs_iget(dir->i_sb, key, &iget_data, scb, cbi, dvnode); - if (!IS_ERR(ti)) - afs_cache_permit(AFS_FS_I(ti), key, - 0 /* Assume vnode->cb_break is 0 */ + - iget_data.cb_v_break, - scb); - if (i == 0) { - inode = ti; - } else { - if (!IS_ERR(ti)) - iput(ti); - } +out_op: + if (op->error == 0) { + inode = &op->file[1].vnode->vfs_inode; + op->file[1].vnode = NULL; } -out_c: - afs_put_cb_interest(afs_v2net(dvnode), cbi); - if (cookie->inodes) { - for (i = 0; i < cookie->nr_fids; i++) - iput(cookie->inodes[i]); - kfree(cookie->inodes); - } -out_s: - kvfree(cookie->statuses); + if (op->file[0].scb.have_status) + dentry->d_fsdata = (void *)(unsigned long)op->file[0].scb.status.data_version; + else + dentry->d_fsdata = (void *)(unsigned long)op->file[0].dv_before; + ret = afs_put_operation(op); out: kfree(cookie); - return inode; + _leave(""); + return inode ?: ERR_PTR(ret); } /* @@ -958,6 +980,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, if (!IS_ERR_OR_NULL(inode)) fid = AFS_FS_I(inode)->fid; + _debug("splice %px", dentry->d_inode); d = d_splice_alias(inode, dentry); if (!IS_ERR_OR_NULL(d)) { d->d_fsdata = dentry->d_fsdata; @@ -965,6 +988,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, } else { trace_afs_lookup(dvnode, &dentry->d_name, &fid); } + _leave(""); return d; } @@ -1215,130 +1239,97 @@ void afs_d_release(struct dentry *dentry) /* * Create a new inode for create/mkdir/symlink */ -static void afs_vnode_new_inode(struct afs_fs_cursor *fc, - struct dentry *new_dentry, - struct afs_iget_data *new_data, - struct afs_status_cb *new_scb) +static void afs_vnode_new_inode(struct afs_operation *op) { + struct afs_vnode_param *vp = &op->file[1]; struct afs_vnode *vnode; struct inode *inode; - if (fc->ac.error < 0) - return; + _enter(""); + + ASSERTCMP(op->error, ==, 0); - inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key, - new_data, new_scb, fc->cbi, fc->vnode); + inode = afs_iget(op, vp); if (IS_ERR(inode)) { /* ENOMEM or EINTR at a really inconvenient time - just abandon * the new directory on the server. */ - fc->ac.error = PTR_ERR(inode); + op->error = PTR_ERR(inode); return; } vnode = AFS_FS_I(inode); set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); - if (fc->ac.error == 0) - afs_cache_permit(vnode, fc->key, vnode->cb_break, new_scb); - d_instantiate(new_dentry, inode); + if (!op->error) + afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb); + d_instantiate(op->dentry, inode); } -static void afs_prep_for_new_inode(struct afs_fs_cursor *fc, - struct afs_iget_data *iget_data) +static void afs_create_success(struct afs_operation *op) { - iget_data->volume = fc->vnode->volume; - iget_data->cb_v_break = fc->vnode->volume->cb_v_break; - iget_data->cb_s_break = fc->cbi->server->cb_s_break; + _enter("op=%08x", op->debug_id); + afs_check_for_remote_deletion(op, op->file[0].vnode); + afs_vnode_commit_status(op, &op->file[0]); + afs_update_dentry_version(op, &op->file[0], op->dentry); + afs_vnode_new_inode(op); } -/* - * Note that a dentry got changed. We need to set d_fsdata to the data version - * number derived from the result of the operation. It doesn't matter if - * d_fsdata goes backwards as we'll just revalidate. - */ -static void afs_update_dentry_version(struct afs_fs_cursor *fc, - struct dentry *dentry, - struct afs_status_cb *scb) +static void afs_create_edit_dir(struct afs_operation *op) +{ + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; + struct afs_vnode *dvnode = dvp->vnode; + + _enter("op=%08x", op->debug_id); + + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) + afs_edit_dir_add(dvnode, &op->dentry->d_name, &vp->fid, + op->create.reason); + up_write(&dvnode->validate_lock); +} + +static void afs_create_put(struct afs_operation *op) { - if (fc->ac.error == 0) - dentry->d_fsdata = - (void *)(unsigned long)scb->status.data_version; + _enter("op=%08x", op->debug_id); + + if (op->error) + d_drop(op->dentry); } +static const struct afs_operation_ops afs_mkdir_operation = { + .issue_afs_rpc = afs_fs_make_dir, + .issue_yfs_rpc = yfs_fs_make_dir, + .success = afs_create_success, + .edit_dir = afs_create_edit_dir, + .put = afs_create_put, +}; + /* * create a directory on an AFS filesystem */ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { - struct afs_iget_data iget_data; - struct afs_status_cb *scb; - struct afs_fs_cursor fc; + struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); - struct key *key; - afs_dataversion_t data_version; - int ret; - - mode |= S_IFDIR; _enter("{%llx:%llu},{%pd},%ho", dvnode->fid.vid, dvnode->fid.vnode, dentry, mode); - ret = -ENOMEM; - scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - goto error; - - key = afs_request_key(dvnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_scb; + op = afs_alloc_operation(NULL, dvnode->volume); + if (IS_ERR(op)) { + d_drop(dentry); + return PTR_ERR(op); } - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - data_version = dvnode->status.data_version + 1; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(dvnode); - afs_prep_for_new_inode(&fc, &iget_data); - afs_fs_create(&fc, dentry->d_name.name, mode, - &scb[0], &iget_data.fid, &scb[1]); - } - - afs_check_for_remote_deletion(&fc, dvnode); - afs_vnode_commit_status(&fc, dvnode, fc.cb_break, - &data_version, &scb[0]); - afs_update_dentry_version(&fc, dentry, &scb[0]); - afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); - ret = afs_end_vnode_operation(&fc); - if (ret < 0) - goto error_key; - } else { - goto error_key; - } - - if (ret == 0) { - down_write(&dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && - dvnode->status.data_version == data_version) - afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, - afs_edit_dir_for_create); - up_write(&dvnode->validate_lock); - } - - key_put(key); - kfree(scb); - _leave(" = 0"); - return 0; - -error_key: - key_put(key); -error_scb: - kfree(scb); -error: - d_drop(dentry); - _leave(" = %d", ret); - return ret; + afs_op_set_vnode(op, 0, dvnode); + op->file[0].dv_delta = 1; + op->dentry = dentry; + op->create.mode = S_IFDIR | mode; + op->create.reason = afs_edit_dir_for_mkdir; + op->ops = &afs_mkdir_operation; + return afs_do_sync_operation(op); } /* @@ -1356,76 +1347,86 @@ static void afs_dir_remove_subdir(struct dentry *dentry) } } +static void afs_rmdir_success(struct afs_operation *op) +{ + _enter("op=%08x", op->debug_id); + afs_check_for_remote_deletion(op, op->file[0].vnode); + afs_vnode_commit_status(op, &op->file[0]); + afs_update_dentry_version(op, &op->file[0], op->dentry); +} + +static void afs_rmdir_edit_dir(struct afs_operation *op) +{ + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode *dvnode = dvp->vnode; + + _enter("op=%08x", op->debug_id); + afs_dir_remove_subdir(op->dentry); + + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) + afs_edit_dir_remove(dvnode, &op->dentry->d_name, + afs_edit_dir_for_rmdir); + up_write(&dvnode->validate_lock); +} + +static void afs_rmdir_put(struct afs_operation *op) +{ + _enter("op=%08x", op->debug_id); + if (op->file[1].vnode) + up_write(&op->file[1].vnode->rmdir_lock); +} + +static const struct afs_operation_ops afs_rmdir_operation = { + .issue_afs_rpc = afs_fs_remove_dir, + .issue_yfs_rpc = yfs_fs_remove_dir, + .success = afs_rmdir_success, + .edit_dir = afs_rmdir_edit_dir, + .put = afs_rmdir_put, +}; + /* * remove a directory from an AFS filesystem */ static int afs_rmdir(struct inode *dir, struct dentry *dentry) { - struct afs_status_cb *scb; - struct afs_fs_cursor fc; + struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL; - struct key *key; - afs_dataversion_t data_version; int ret; _enter("{%llx:%llu},{%pd}", dvnode->fid.vid, dvnode->fid.vnode, dentry); - scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - return -ENOMEM; + op = afs_alloc_operation(NULL, dvnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - key = afs_request_key(dvnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error; - } + afs_op_set_vnode(op, 0, dvnode); + op->file[0].dv_delta = 1; + + op->dentry = dentry; + op->ops = &afs_rmdir_operation; /* Try to make sure we have a callback promise on the victim. */ if (d_really_is_positive(dentry)) { vnode = AFS_FS_I(d_inode(dentry)); - ret = afs_validate(vnode, key); + ret = afs_validate(vnode, op->key); if (ret < 0) - goto error_key; + goto error; } if (vnode) { ret = down_write_killable(&vnode->rmdir_lock); if (ret < 0) - goto error_key; + goto error; + op->file[1].vnode = vnode; } - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - data_version = dvnode->status.data_version + 1; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(dvnode); - afs_fs_remove(&fc, vnode, dentry->d_name.name, true, scb); - } - - afs_vnode_commit_status(&fc, dvnode, fc.cb_break, - &data_version, scb); - afs_update_dentry_version(&fc, dentry, scb); - ret = afs_end_vnode_operation(&fc); - if (ret == 0) { - afs_dir_remove_subdir(dentry); - down_write(&dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && - dvnode->status.data_version == data_version) - afs_edit_dir_remove(dvnode, &dentry->d_name, - afs_edit_dir_for_rmdir); - up_write(&dvnode->validate_lock); - } - } + return afs_do_sync_operation(op); - if (vnode) - up_write(&vnode->rmdir_lock); -error_key: - key_put(key); error: - kfree(scb); - return ret; + return afs_put_operation(op); } /* @@ -1438,52 +1439,90 @@ error: * However, if we didn't have a callback promise outstanding, or it was * outstanding on a different server, then it won't break it either... */ -static int afs_dir_remove_link(struct afs_vnode *dvnode, struct dentry *dentry, - struct key *key) +static void afs_dir_remove_link(struct afs_operation *op) { - int ret = 0; + struct afs_vnode *dvnode = op->file[0].vnode; + struct afs_vnode *vnode = op->file[1].vnode; + struct dentry *dentry = op->dentry; + int ret; - if (d_really_is_positive(dentry)) { - struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); + if (op->error != 0 || + (op->file[1].scb.have_status && op->file[1].scb.have_error)) + return; + if (d_really_is_positive(dentry)) + return; - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { - /* Already done */ - } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { - write_seqlock(&vnode->cb_lock); - drop_nlink(&vnode->vfs_inode); - if (vnode->vfs_inode.i_nlink == 0) { - set_bit(AFS_VNODE_DELETED, &vnode->flags); - __afs_break_callback(vnode, afs_cb_break_for_unlink); - } - write_sequnlock(&vnode->cb_lock); - ret = 0; - } else { - afs_break_callback(vnode, afs_cb_break_for_unlink); + if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { + /* Already done */ + } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { + write_seqlock(&vnode->cb_lock); + drop_nlink(&vnode->vfs_inode); + if (vnode->vfs_inode.i_nlink == 0) { + set_bit(AFS_VNODE_DELETED, &vnode->flags); + __afs_break_callback(vnode, afs_cb_break_for_unlink); + } + write_sequnlock(&vnode->cb_lock); + } else { + afs_break_callback(vnode, afs_cb_break_for_unlink); - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) - kdebug("AFS_VNODE_DELETED"); + if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) + _debug("AFS_VNODE_DELETED"); - ret = afs_validate(vnode, key); - if (ret == -ESTALE) - ret = 0; - } - _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret); + ret = afs_validate(vnode, op->key); + if (ret != -ESTALE) + op->error = ret; } - return ret; + _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, op->error); +} + +static void afs_unlink_success(struct afs_operation *op) +{ + _enter("op=%08x", op->debug_id); + afs_check_for_remote_deletion(op, op->file[0].vnode); + afs_vnode_commit_status(op, &op->file[0]); + afs_vnode_commit_status(op, &op->file[1]); + afs_update_dentry_version(op, &op->file[0], op->dentry); + afs_dir_remove_link(op); +} + +static void afs_unlink_edit_dir(struct afs_operation *op) +{ + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode *dvnode = dvp->vnode; + + _enter("op=%08x", op->debug_id); + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) + afs_edit_dir_remove(dvnode, &op->dentry->d_name, + afs_edit_dir_for_unlink); + up_write(&dvnode->validate_lock); +} + +static void afs_unlink_put(struct afs_operation *op) +{ + _enter("op=%08x", op->debug_id); + if (op->unlink.need_rehash && op->error < 0 && op->error != -ENOENT) + d_rehash(op->dentry); } +static const struct afs_operation_ops afs_unlink_operation = { + .issue_afs_rpc = afs_fs_remove_file, + .issue_yfs_rpc = yfs_fs_remove_file, + .success = afs_unlink_success, + .edit_dir = afs_unlink_edit_dir, + .put = afs_unlink_put, +}; + /* * Remove a file or symlink from an AFS filesystem. */ static int afs_unlink(struct inode *dir, struct dentry *dentry) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - struct key *key; - bool need_rehash = false; int ret; _enter("{%llx:%llu},{%pd}", @@ -1492,269 +1531,176 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) if (dentry->d_name.len >= AFSNAMEMAX) return -ENAMETOOLONG; - ret = -ENOMEM; - scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - goto error; + op = afs_alloc_operation(NULL, dvnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - key = afs_request_key(dvnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_scb; - } + afs_op_set_vnode(op, 0, dvnode); + op->file[0].dv_delta = 1; /* Try to make sure we have a callback promise on the victim. */ - ret = afs_validate(vnode, key); - if (ret < 0) - goto error_key; + ret = afs_validate(vnode, op->key); + if (ret < 0) { + op->error = ret; + goto error; + } spin_lock(&dentry->d_lock); if (d_count(dentry) > 1) { spin_unlock(&dentry->d_lock); /* Start asynchronous writeout of the inode */ write_inode_now(d_inode(dentry), 0); - ret = afs_sillyrename(dvnode, vnode, dentry, key); - goto error_key; + op->error = afs_sillyrename(dvnode, vnode, dentry, op->key); + goto error; } if (!d_unhashed(dentry)) { /* Prevent a race with RCU lookup. */ __d_drop(dentry); - need_rehash = true; + op->unlink.need_rehash = true; } spin_unlock(&dentry->d_lock); - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - afs_dataversion_t data_version = dvnode->status.data_version + 1; - afs_dataversion_t data_version_2 = vnode->status.data_version; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(dvnode); - fc.cb_break_2 = afs_calc_vnode_cb_break(vnode); - - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) && - !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) { - yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name, - &scb[0], &scb[1]); - if (fc.ac.error != -ECONNABORTED || - fc.ac.abort_code != RXGEN_OPCODE) - continue; - set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags); - } - - afs_fs_remove(&fc, vnode, dentry->d_name.name, false, &scb[0]); - } + op->file[1].vnode = vnode; + op->dentry = dentry; + op->ops = &afs_unlink_operation; + return afs_do_sync_operation(op); - afs_vnode_commit_status(&fc, dvnode, fc.cb_break, - &data_version, &scb[0]); - afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, - &data_version_2, &scb[1]); - afs_update_dentry_version(&fc, dentry, &scb[0]); - ret = afs_end_vnode_operation(&fc); - if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) - ret = afs_dir_remove_link(dvnode, dentry, key); - - if (ret == 0) { - down_write(&dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && - dvnode->status.data_version == data_version) - afs_edit_dir_remove(dvnode, &dentry->d_name, - afs_edit_dir_for_unlink); - up_write(&dvnode->validate_lock); - } - } - - if (need_rehash && ret < 0 && ret != -ENOENT) - d_rehash(dentry); - -error_key: - key_put(key); -error_scb: - kfree(scb); error: - _leave(" = %d", ret); - return ret; + return afs_put_operation(op); } +static const struct afs_operation_ops afs_create_operation = { + .issue_afs_rpc = afs_fs_create_file, + .issue_yfs_rpc = yfs_fs_create_file, + .success = afs_create_success, + .edit_dir = afs_create_edit_dir, + .put = afs_create_put, +}; + /* * create a regular file on an AFS filesystem */ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { - struct afs_iget_data iget_data; - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); - struct key *key; - afs_dataversion_t data_version; - int ret; - - mode |= S_IFREG; + int ret = -ENAMETOOLONG; - _enter("{%llx:%llu},{%pd},%ho,", + _enter("{%llx:%llu},{%pd},%ho", dvnode->fid.vid, dvnode->fid.vnode, dentry, mode); - ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; - key = afs_request_key(dvnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + op = afs_alloc_operation(NULL, dvnode->volume); + if (IS_ERR(op)) { + ret = PTR_ERR(op); goto error; } - ret = -ENOMEM; - scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - goto error_scb; + afs_op_set_vnode(op, 0, dvnode); + op->file[0].dv_delta = 1; - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - data_version = dvnode->status.data_version + 1; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(dvnode); - afs_prep_for_new_inode(&fc, &iget_data); - afs_fs_create(&fc, dentry->d_name.name, mode, - &scb[0], &iget_data.fid, &scb[1]); - } + op->dentry = dentry; + op->create.mode = S_IFREG | mode; + op->create.reason = afs_edit_dir_for_create; + op->ops = &afs_create_operation; + return afs_do_sync_operation(op); - afs_check_for_remote_deletion(&fc, dvnode); - afs_vnode_commit_status(&fc, dvnode, fc.cb_break, - &data_version, &scb[0]); - afs_update_dentry_version(&fc, dentry, &scb[0]); - afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); - ret = afs_end_vnode_operation(&fc); - if (ret < 0) - goto error_key; - } else { - goto error_key; - } - - down_write(&dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && - dvnode->status.data_version == data_version) - afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, - afs_edit_dir_for_create); - up_write(&dvnode->validate_lock); - - kfree(scb); - key_put(key); - _leave(" = 0"); - return 0; - -error_scb: - kfree(scb); -error_key: - key_put(key); error: d_drop(dentry); _leave(" = %d", ret); return ret; } +static void afs_link_success(struct afs_operation *op) +{ + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; + + _enter("op=%08x", op->debug_id); + afs_vnode_commit_status(op, dvp); + afs_vnode_commit_status(op, vp); + afs_update_dentry_version(op, dvp, op->dentry); + if (op->dentry_2->d_parent == op->dentry->d_parent) + afs_update_dentry_version(op, dvp, op->dentry_2); + ihold(&vp->vnode->vfs_inode); + d_instantiate(op->dentry, &vp->vnode->vfs_inode); +} + +static void afs_link_put(struct afs_operation *op) +{ + _enter("op=%08x", op->debug_id); + if (op->error) + d_drop(op->dentry); +} + +static const struct afs_operation_ops afs_link_operation = { + .issue_afs_rpc = afs_fs_link, + .issue_yfs_rpc = yfs_fs_link, + .success = afs_link_success, + .edit_dir = afs_create_edit_dir, + .put = afs_link_put, +}; + /* * create a hard link between files in an AFS filesystem */ static int afs_link(struct dentry *from, struct inode *dir, struct dentry *dentry) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); struct afs_vnode *vnode = AFS_FS_I(d_inode(from)); - struct key *key; - afs_dataversion_t data_version; - int ret; + int ret = -ENAMETOOLONG; _enter("{%llx:%llu},{%llx:%llu},{%pd}", vnode->fid.vid, vnode->fid.vnode, dvnode->fid.vid, dvnode->fid.vnode, dentry); - ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; - ret = -ENOMEM; - scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) + op = afs_alloc_operation(NULL, dvnode->volume); + if (IS_ERR(op)) { + ret = PTR_ERR(op); goto error; - - key = afs_request_key(dvnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_scb; - } - - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - data_version = dvnode->status.data_version + 1; - - if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) { - afs_end_vnode_operation(&fc); - goto error_key; - } - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(dvnode); - fc.cb_break_2 = afs_calc_vnode_cb_break(vnode); - afs_fs_link(&fc, vnode, dentry->d_name.name, - &scb[0], &scb[1]); - } - - afs_vnode_commit_status(&fc, dvnode, fc.cb_break, - &data_version, &scb[0]); - afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, - NULL, &scb[1]); - ihold(&vnode->vfs_inode); - afs_update_dentry_version(&fc, dentry, &scb[0]); - d_instantiate(dentry, &vnode->vfs_inode); - - mutex_unlock(&vnode->io_lock); - ret = afs_end_vnode_operation(&fc); - if (ret < 0) - goto error_key; - } else { - goto error_key; } - down_write(&dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && - dvnode->status.data_version == data_version) - afs_edit_dir_add(dvnode, &dentry->d_name, &vnode->fid, - afs_edit_dir_for_link); - up_write(&dvnode->validate_lock); + afs_op_set_vnode(op, 0, dvnode); + afs_op_set_vnode(op, 1, vnode); + op->file[0].dv_delta = 1; - key_put(key); - kfree(scb); - _leave(" = 0"); - return 0; + op->dentry = dentry; + op->dentry_2 = from; + op->ops = &afs_link_operation; + op->create.reason = afs_edit_dir_for_link; + return afs_do_sync_operation(op); -error_key: - key_put(key); -error_scb: - kfree(scb); error: d_drop(dentry); _leave(" = %d", ret); return ret; } +static const struct afs_operation_ops afs_symlink_operation = { + .issue_afs_rpc = afs_fs_symlink, + .issue_yfs_rpc = yfs_fs_symlink, + .success = afs_create_success, + .edit_dir = afs_create_edit_dir, + .put = afs_create_put, +}; + /* * create a symlink in an AFS filesystem */ static int afs_symlink(struct inode *dir, struct dentry *dentry, const char *content) { - struct afs_iget_data iget_data; - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); - struct key *key; - afs_dataversion_t data_version; int ret; _enter("{%llx:%llu},{%pd},%s", @@ -1769,62 +1715,115 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, if (strlen(content) >= AFSPATHMAX) goto error; - ret = -ENOMEM; - scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) + op = afs_alloc_operation(NULL, dvnode->volume); + if (IS_ERR(op)) { + ret = PTR_ERR(op); goto error; - - key = afs_request_key(dvnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_scb; } - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - data_version = dvnode->status.data_version + 1; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(dvnode); - afs_prep_for_new_inode(&fc, &iget_data); - afs_fs_symlink(&fc, dentry->d_name.name, content, - &scb[0], &iget_data.fid, &scb[1]); - } + afs_op_set_vnode(op, 0, dvnode); + op->file[0].dv_delta = 1; - afs_check_for_remote_deletion(&fc, dvnode); - afs_vnode_commit_status(&fc, dvnode, fc.cb_break, - &data_version, &scb[0]); - afs_update_dentry_version(&fc, dentry, &scb[0]); - afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); - ret = afs_end_vnode_operation(&fc); - if (ret < 0) - goto error_key; - } else { - goto error_key; - } - - down_write(&dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && - dvnode->status.data_version == data_version) - afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, - afs_edit_dir_for_symlink); - up_write(&dvnode->validate_lock); + op->dentry = dentry; + op->ops = &afs_symlink_operation; + op->create.reason = afs_edit_dir_for_symlink; + op->create.symlink = content; + return afs_do_sync_operation(op); - key_put(key); - kfree(scb); - _leave(" = 0"); - return 0; - -error_key: - key_put(key); -error_scb: - kfree(scb); error: d_drop(dentry); _leave(" = %d", ret); return ret; } +static void afs_rename_success(struct afs_operation *op) +{ + _enter("op=%08x", op->debug_id); + + afs_vnode_commit_status(op, &op->file[0]); + if (op->file[1].vnode != op->file[0].vnode) + afs_vnode_commit_status(op, &op->file[1]); +} + +static void afs_rename_edit_dir(struct afs_operation *op) +{ + struct afs_vnode_param *orig_dvp = &op->file[0]; + struct afs_vnode_param *new_dvp = &op->file[1]; + struct afs_vnode *orig_dvnode = orig_dvp->vnode; + struct afs_vnode *new_dvnode = new_dvp->vnode; + struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry)); + struct dentry *old_dentry = op->dentry; + struct dentry *new_dentry = op->dentry_2; + struct inode *new_inode; + + _enter("op=%08x", op->debug_id); + + if (op->rename.rehash) { + d_rehash(op->rename.rehash); + op->rename.rehash = NULL; + } + + down_write(&orig_dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) && + orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta) + afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name, + afs_edit_dir_for_rename_0); + + if (new_dvnode != orig_dvnode) { + up_write(&orig_dvnode->validate_lock); + down_write(&new_dvnode->validate_lock); + } + + if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) && + new_dvnode->status.data_version == new_dvp->dv_before + new_dvp->dv_delta) { + if (!op->rename.new_negative) + afs_edit_dir_remove(new_dvnode, &new_dentry->d_name, + afs_edit_dir_for_rename_1); + + afs_edit_dir_add(new_dvnode, &new_dentry->d_name, + &vnode->fid, afs_edit_dir_for_rename_2); + } + + new_inode = d_inode(new_dentry); + if (new_inode) { + spin_lock(&new_inode->i_lock); + if (new_inode->i_nlink > 0) + drop_nlink(new_inode); + spin_unlock(&new_inode->i_lock); + } + + /* Now we can update d_fsdata on the dentries to reflect their + * new parent's data_version. + * + * Note that if we ever implement RENAME_EXCHANGE, we'll have + * to update both dentries with opposing dir versions. + */ + afs_update_dentry_version(op, new_dvp, op->dentry); + afs_update_dentry_version(op, new_dvp, op->dentry_2); + + d_move(old_dentry, new_dentry); + + up_write(&new_dvnode->validate_lock); +} + +static void afs_rename_put(struct afs_operation *op) +{ + _enter("op=%08x", op->debug_id); + if (op->rename.rehash) + d_rehash(op->rename.rehash); + dput(op->rename.tmp); + if (op->error) + d_rehash(op->dentry); +} + +static const struct afs_operation_ops afs_rename_operation = { + .issue_afs_rpc = afs_fs_rename, + .issue_yfs_rpc = yfs_fs_rename, + .success = afs_rename_success, + .edit_dir = afs_rename_edit_dir, + .put = afs_rename_put, +}; + /* * rename a file in an AFS filesystem and/or move it between directories */ @@ -1832,15 +1831,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *orig_dvnode, *new_dvnode, *vnode; - struct dentry *tmp = NULL, *rehash = NULL; - struct inode *new_inode; - struct key *key; - afs_dataversion_t orig_data_version; - afs_dataversion_t new_data_version; - bool new_negative = d_is_negative(new_dentry); int ret; if (flags) @@ -1860,16 +1852,19 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dvnode->fid.vid, new_dvnode->fid.vnode, new_dentry); - ret = -ENOMEM; - scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - goto error; + op = afs_alloc_operation(NULL, orig_dvnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - key = afs_request_key(orig_dvnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_scb; - } + afs_op_set_vnode(op, 0, orig_dvnode); + afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */ + op->file[0].dv_delta = 1; + op->file[1].dv_delta = 1; + + op->dentry = old_dentry; + op->dentry_2 = new_dentry; + op->rename.new_negative = d_is_negative(new_dentry); + op->ops = &afs_rename_operation; /* For non-directories, check whether the target is busy and if so, * make a copy of the dentry and then do a silly-rename. If the @@ -1882,26 +1877,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, */ if (!d_unhashed(new_dentry)) { d_drop(new_dentry); - rehash = new_dentry; + op->rename.rehash = new_dentry; } if (d_count(new_dentry) > 2) { /* copy the target dentry's name */ ret = -ENOMEM; - tmp = d_alloc(new_dentry->d_parent, - &new_dentry->d_name); - if (!tmp) - goto error_rehash; + op->rename.tmp = d_alloc(new_dentry->d_parent, + &new_dentry->d_name); + if (!op->rename.tmp) + goto error; ret = afs_sillyrename(new_dvnode, AFS_FS_I(d_inode(new_dentry)), - new_dentry, key); + new_dentry, op->key); if (ret) - goto error_rehash; + goto error; - new_dentry = tmp; - rehash = NULL; - new_negative = true; + op->dentry_2 = op->rename.tmp; + op->rename.rehash = NULL; + op->rename.new_negative = true; } } @@ -1916,98 +1911,10 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, */ d_drop(old_dentry); - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { - orig_data_version = orig_dvnode->status.data_version + 1; - - if (orig_dvnode != new_dvnode) { - if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { - afs_end_vnode_operation(&fc); - goto error_rehash_old; - } - new_data_version = new_dvnode->status.data_version + 1; - } else { - new_data_version = orig_data_version; - } - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(orig_dvnode); - fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode); - afs_fs_rename(&fc, old_dentry->d_name.name, - new_dvnode, new_dentry->d_name.name, - &scb[0], &scb[1]); - } + return afs_do_sync_operation(op); - afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break, - &orig_data_version, &scb[0]); - if (new_dvnode != orig_dvnode) { - afs_vnode_commit_status(&fc, new_dvnode, fc.cb_break_2, - &new_data_version, &scb[1]); - mutex_unlock(&new_dvnode->io_lock); - } - ret = afs_end_vnode_operation(&fc); - if (ret < 0) - goto error_rehash_old; - } - - if (ret == 0) { - if (rehash) - d_rehash(rehash); - down_write(&orig_dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) && - orig_dvnode->status.data_version == orig_data_version) - afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name, - afs_edit_dir_for_rename_0); - if (orig_dvnode != new_dvnode) { - up_write(&orig_dvnode->validate_lock); - - down_write(&new_dvnode->validate_lock); - } - if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) && - orig_dvnode->status.data_version == new_data_version) { - if (!new_negative) - afs_edit_dir_remove(new_dvnode, &new_dentry->d_name, - afs_edit_dir_for_rename_1); - - afs_edit_dir_add(new_dvnode, &new_dentry->d_name, - &vnode->fid, afs_edit_dir_for_rename_2); - } - - new_inode = d_inode(new_dentry); - if (new_inode) { - spin_lock(&new_inode->i_lock); - if (new_inode->i_nlink > 0) - drop_nlink(new_inode); - spin_unlock(&new_inode->i_lock); - } - - /* Now we can update d_fsdata on the dentries to reflect their - * new parent's data_version. - * - * Note that if we ever implement RENAME_EXCHANGE, we'll have - * to update both dentries with opposing dir versions. - */ - afs_update_dentry_version(&fc, old_dentry, &scb[1]); - afs_update_dentry_version(&fc, new_dentry, &scb[1]); - d_move(old_dentry, new_dentry); - up_write(&new_dvnode->validate_lock); - goto error_tmp; - } - -error_rehash_old: - d_rehash(new_dentry); -error_rehash: - if (rehash) - d_rehash(rehash); -error_tmp: - if (tmp) - dput(tmp); - key_put(key); -error_scb: - kfree(scb); error: - _leave(" = %d", ret); - return ret; + return afs_put_operation(op); } /* diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c index d94e2b7cddff..b14e3d9a25e2 100644 --- a/fs/afs/dir_silly.c +++ b/fs/afs/dir_silly.c @@ -12,6 +12,47 @@ #include <linux/fsnotify.h> #include "internal.h" +static void afs_silly_rename_success(struct afs_operation *op) +{ + _enter("op=%08x", op->debug_id); + + afs_vnode_commit_status(op, &op->file[0]); +} + +static void afs_silly_rename_edit_dir(struct afs_operation *op) +{ + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode *dvnode = dvp->vnode; + struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry)); + struct dentry *old = op->dentry; + struct dentry *new = op->dentry_2; + + spin_lock(&old->d_lock); + old->d_flags |= DCACHE_NFSFS_RENAMED; + spin_unlock(&old->d_lock); + if (dvnode->silly_key != op->key) { + key_put(dvnode->silly_key); + dvnode->silly_key = key_get(op->key); + } + + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) { + afs_edit_dir_remove(dvnode, &old->d_name, + afs_edit_dir_for_silly_0); + afs_edit_dir_add(dvnode, &new->d_name, + &vnode->fid, afs_edit_dir_for_silly_1); + } + up_write(&dvnode->validate_lock); +} + +static const struct afs_operation_ops afs_silly_rename_operation = { + .issue_afs_rpc = afs_fs_rename, + .issue_yfs_rpc = yfs_fs_rename, + .success = afs_silly_rename_success, + .edit_dir = afs_silly_rename_edit_dir, +}; + /* * Actually perform the silly rename step. */ @@ -19,56 +60,22 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode struct dentry *old, struct dentry *new, struct key *key) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; - afs_dataversion_t dir_data_version; - int ret = -ERESTARTSYS; + struct afs_operation *op; _enter("%pd,%pd", old, new); - scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - return -ENOMEM; + op = afs_alloc_operation(key, dvnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - trace_afs_silly_rename(vnode, false); - if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - dir_data_version = dvnode->status.data_version + 1; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(dvnode); - afs_fs_rename(&fc, old->d_name.name, - dvnode, new->d_name.name, - scb, scb); - } - - afs_vnode_commit_status(&fc, dvnode, fc.cb_break, - &dir_data_version, scb); - ret = afs_end_vnode_operation(&fc); - } + afs_op_set_vnode(op, 0, dvnode); - if (ret == 0) { - spin_lock(&old->d_lock); - old->d_flags |= DCACHE_NFSFS_RENAMED; - spin_unlock(&old->d_lock); - if (dvnode->silly_key != key) { - key_put(dvnode->silly_key); - dvnode->silly_key = key_get(key); - } - - down_write(&dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && - dvnode->status.data_version == dir_data_version) { - afs_edit_dir_remove(dvnode, &old->d_name, - afs_edit_dir_for_silly_0); - afs_edit_dir_add(dvnode, &new->d_name, - &vnode->fid, afs_edit_dir_for_silly_1); - } - up_write(&dvnode->validate_lock); - } + op->dentry = old; + op->dentry_2 = new; + op->ops = &afs_silly_rename_operation; - kfree(scb); - _leave(" = %d", ret); - return ret; + trace_afs_silly_rename(vnode, false); + return afs_do_sync_operation(op); } /** @@ -139,65 +146,66 @@ out: return ret; } +static void afs_silly_unlink_success(struct afs_operation *op) +{ + struct afs_vnode *vnode = op->file[1].vnode; + + _enter("op=%08x", op->debug_id); + afs_check_for_remote_deletion(op, op->file[0].vnode); + afs_vnode_commit_status(op, &op->file[0]); + afs_vnode_commit_status(op, &op->file[1]); + afs_update_dentry_version(op, &op->file[0], op->dentry); + + drop_nlink(&vnode->vfs_inode); + if (vnode->vfs_inode.i_nlink == 0) { + set_bit(AFS_VNODE_DELETED, &vnode->flags); + clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); + } +} + +static void afs_silly_unlink_edit_dir(struct afs_operation *op) +{ + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode *dvnode = dvp->vnode; + + _enter("op=%08x", op->debug_id); + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) + afs_edit_dir_remove(dvnode, &op->dentry->d_name, + afs_edit_dir_for_unlink); + up_write(&dvnode->validate_lock); +} + +static const struct afs_operation_ops afs_silly_unlink_operation = { + .issue_afs_rpc = afs_fs_remove_file, + .issue_yfs_rpc = yfs_fs_remove_file, + .success = afs_silly_unlink_success, + .edit_dir = afs_silly_unlink_edit_dir, +}; + /* * Tell the server to remove a sillyrename file. */ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode, struct dentry *dentry, struct key *key) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; - int ret = -ERESTARTSYS; + struct afs_operation *op; _enter(""); - scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - return -ENOMEM; + op = afs_alloc_operation(NULL, dvnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - trace_afs_silly_rename(vnode, true); - if (afs_begin_vnode_operation(&fc, dvnode, key, false)) { - afs_dataversion_t dir_data_version = dvnode->status.data_version + 1; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(dvnode); - - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) && - !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) { - yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name, - &scb[0], &scb[1]); - if (fc.ac.error != -ECONNABORTED || - fc.ac.abort_code != RXGEN_OPCODE) - continue; - set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags); - } - - afs_fs_remove(&fc, vnode, dentry->d_name.name, false, &scb[0]); - } + afs_op_set_vnode(op, 0, dvnode); + afs_op_set_vnode(op, 1, vnode); - afs_vnode_commit_status(&fc, dvnode, fc.cb_break, - &dir_data_version, &scb[0]); - ret = afs_end_vnode_operation(&fc); - if (ret == 0) { - drop_nlink(&vnode->vfs_inode); - if (vnode->vfs_inode.i_nlink == 0) { - set_bit(AFS_VNODE_DELETED, &vnode->flags); - clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); - } - } - if (ret == 0) { - down_write(&dvnode->validate_lock); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && - dvnode->status.data_version == dir_data_version) - afs_edit_dir_remove(dvnode, &dentry->d_name, - afs_edit_dir_for_unlink); - up_write(&dvnode->validate_lock); - } - } + op->dentry = dentry; + op->ops = &afs_silly_unlink_operation; - kfree(scb); - _leave(" = %d", ret); - return ret; + trace_afs_silly_rename(vnode, true); + return afs_do_sync_operation(op); } /* diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index 7503899c0a1b..b79879aacc02 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -10,6 +10,99 @@ #include <linux/dns_resolver.h> #include "internal.h" +static atomic_t afs_autocell_ino; + +/* + * iget5() comparator for inode created by autocell operations + * + * These pseudo inodes don't match anything. + */ +static int afs_iget5_pseudo_test(struct inode *inode, void *opaque) +{ + return 0; +} + +/* + * iget5() inode initialiser + */ +static int afs_iget5_pseudo_set(struct inode *inode, void *opaque) +{ + struct afs_super_info *as = AFS_FS_S(inode->i_sb); + struct afs_vnode *vnode = AFS_FS_I(inode); + struct afs_fid *fid = opaque; + + vnode->volume = as->volume; + vnode->fid = *fid; + inode->i_ino = fid->vnode; + inode->i_generation = fid->unique; + return 0; +} + +/* + * Create an inode for a dynamic root directory or an autocell dynamic + * automount dir. + */ +struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) +{ + struct afs_super_info *as = AFS_FS_S(sb); + struct afs_vnode *vnode; + struct inode *inode; + struct afs_fid fid = {}; + + _enter(""); + + if (as->volume) + fid.vid = as->volume->vid; + if (root) { + fid.vnode = 1; + fid.unique = 1; + } else { + fid.vnode = atomic_inc_return(&afs_autocell_ino); + fid.unique = 0; + } + + inode = iget5_locked(sb, fid.vnode, + afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid); + if (!inode) { + _leave(" = -ENOMEM"); + return ERR_PTR(-ENOMEM); + } + + _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }", + inode, inode->i_ino, fid.vid, fid.vnode, fid.unique); + + vnode = AFS_FS_I(inode); + + /* there shouldn't be an existing inode */ + BUG_ON(!(inode->i_state & I_NEW)); + + inode->i_size = 0; + inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; + if (root) { + inode->i_op = &afs_dynroot_inode_operations; + inode->i_fop = &simple_dir_operations; + } else { + inode->i_op = &afs_autocell_inode_operations; + } + set_nlink(inode, 2); + inode->i_uid = GLOBAL_ROOT_UID; + inode->i_gid = GLOBAL_ROOT_GID; + inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode); + inode->i_blocks = 0; + inode->i_generation = 0; + + set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags); + if (!root) { + set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); + inode->i_flags |= S_AUTOMOUNT; + } + + inode->i_flags |= S_NOATIME; + unlock_new_inode(inode); + _leave(" = %p", inode); + return inode; +} + /* * Probe to see if a cell may exist. This prevents positive dentries from * being created unnecessarily. diff --git a/fs/afs/file.c b/fs/afs/file.c index 8415733f7bc1..506c47471b42 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -69,7 +69,7 @@ static const struct vm_operations_struct afs_vm_ops = { */ void afs_put_wb_key(struct afs_wb_key *wbk) { - if (refcount_dec_and_test(&wbk->usage)) { + if (wbk && refcount_dec_and_test(&wbk->usage)) { key_put(wbk->key); kfree(wbk); } @@ -220,14 +220,35 @@ static void afs_file_readpage_read_complete(struct page *page, } #endif +static void afs_fetch_data_success(struct afs_operation *op) +{ + struct afs_vnode *vnode = op->file[0].vnode; + + _enter("op=%08x", op->debug_id); + afs_check_for_remote_deletion(op, vnode); + afs_vnode_commit_status(op, &op->file[0]); + afs_stat_v(vnode, n_fetches); + atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes); +} + +static void afs_fetch_data_put(struct afs_operation *op) +{ + afs_put_read(op->fetch.req); +} + +static const struct afs_operation_ops afs_fetch_data_operation = { + .issue_afs_rpc = afs_fs_fetch_data, + .issue_yfs_rpc = yfs_fs_fetch_data, + .success = afs_fetch_data_success, + .put = afs_fetch_data_put, +}; + /* * Fetch file data from the volume. */ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; - int ret; + struct afs_operation *op; _enter("%s{%llx:%llu.%u},%x,,,", vnode->volume->name, @@ -236,34 +257,15 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *re vnode->fid.unique, key_serial(key)); - scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - return -ENOMEM; - - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, true)) { - afs_dataversion_t data_version = vnode->status.data_version; + op = afs_alloc_operation(key, vnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_fetch_data(&fc, scb, req); - } - - afs_check_for_remote_deletion(&fc, vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, - &data_version, scb); - ret = afs_end_vnode_operation(&fc); - } + afs_op_set_vnode(op, 0, vnode); - if (ret == 0) { - afs_stat_v(vnode, n_fetches); - atomic_long_add(req->actual_len, - &afs_v2net(vnode)->n_fetch_bytes); - } - - kfree(scb); - _leave(" = %d", ret); - return ret; + op->fetch.req = afs_get_read(req); + op->ops = &afs_fetch_data_operation; + return afs_do_sync_operation(op); } /* diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 0f2a94ba73cb..70e518f7bc19 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -70,7 +70,8 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode) */ void afs_lock_op_done(struct afs_call *call) { - struct afs_vnode *vnode = call->lvnode; + struct afs_operation *op = call->op; + struct afs_vnode *vnode = op->lock.lvnode; if (call->error == 0) { spin_lock(&vnode->lock); @@ -172,15 +173,28 @@ static void afs_kill_lockers_enoent(struct afs_vnode *vnode) vnode->lock_key = NULL; } +static void afs_lock_success(struct afs_operation *op) +{ + struct afs_vnode *vnode = op->file[0].vnode; + + _enter("op=%08x", op->debug_id); + afs_check_for_remote_deletion(op, vnode); + afs_vnode_commit_status(op, &op->file[0]); +} + +static const struct afs_operation_ops afs_set_lock_operation = { + .issue_afs_rpc = afs_fs_set_lock, + .issue_yfs_rpc = yfs_fs_set_lock, + .success = afs_lock_success, +}; + /* * Get a lock on a file */ static int afs_set_lock(struct afs_vnode *vnode, struct key *key, afs_lock_type_t type) { - struct afs_status_cb *scb; - struct afs_fs_cursor fc; - int ret; + struct afs_operation *op; _enter("%s{%llx:%llu.%u},%x,%u", vnode->volume->name, @@ -189,35 +203,29 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key, vnode->fid.unique, key_serial(key), type); - scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - return -ENOMEM; + op = afs_alloc_operation(key, vnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, true)) { - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_set_lock(&fc, type, scb); - } - - afs_check_for_remote_deletion(&fc, vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb); - ret = afs_end_vnode_operation(&fc); - } + afs_op_set_vnode(op, 0, vnode); - kfree(scb); - _leave(" = %d", ret); - return ret; + op->lock.type = type; + op->ops = &afs_set_lock_operation; + return afs_do_sync_operation(op); } +static const struct afs_operation_ops afs_extend_lock_operation = { + .issue_afs_rpc = afs_fs_extend_lock, + .issue_yfs_rpc = yfs_fs_extend_lock, + .success = afs_lock_success, +}; + /* * Extend a lock on a file */ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key) { - struct afs_status_cb *scb; - struct afs_fs_cursor fc; - int ret; + struct afs_operation *op; _enter("%s{%llx:%llu.%u},%x", vnode->volume->name, @@ -226,35 +234,29 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key) vnode->fid.unique, key_serial(key)); - scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - return -ENOMEM; - - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, false)) { - while (afs_select_current_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_extend_lock(&fc, scb); - } + op = afs_alloc_operation(key, vnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - afs_check_for_remote_deletion(&fc, vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb); - ret = afs_end_vnode_operation(&fc); - } + afs_op_set_vnode(op, 0, vnode); - kfree(scb); - _leave(" = %d", ret); - return ret; + op->flags |= AFS_OPERATION_UNINTR; + op->ops = &afs_extend_lock_operation; + return afs_do_sync_operation(op); } +static const struct afs_operation_ops afs_release_lock_operation = { + .issue_afs_rpc = afs_fs_release_lock, + .issue_yfs_rpc = yfs_fs_release_lock, + .success = afs_lock_success, +}; + /* * Release a lock on a file */ static int afs_release_lock(struct afs_vnode *vnode, struct key *key) { - struct afs_status_cb *scb; - struct afs_fs_cursor fc; - int ret; + struct afs_operation *op; _enter("%s{%llx:%llu.%u},%x", vnode->volume->name, @@ -263,25 +265,15 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key) vnode->fid.unique, key_serial(key)); - scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - return -ENOMEM; + op = afs_alloc_operation(key, vnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, false)) { - while (afs_select_current_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_release_lock(&fc, scb); - } + afs_op_set_vnode(op, 0, vnode); - afs_check_for_remote_deletion(&fc, vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb); - ret = afs_end_vnode_operation(&fc); - } - - kfree(scb); - _leave(" = %d", ret); - return ret; + op->flags |= AFS_OPERATION_UNINTR; + op->ops = &afs_release_lock_operation; + return afs_do_sync_operation(op); } /* diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c new file mode 100644 index 000000000000..2d2dff5688a4 --- /dev/null +++ b/fs/afs/fs_operation.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Fileserver-directed operation handling. + * + * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include "internal.h" + +static atomic_t afs_operation_debug_counter; + +/* + * Create an operation against a volume. + */ +struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *volume) +{ + struct afs_operation *op; + + _enter(""); + + op = kzalloc(sizeof(*op), GFP_KERNEL); + if (!op) + return ERR_PTR(-ENOMEM); + + if (!key) { + key = afs_request_key(volume->cell); + if (IS_ERR(key)) { + kfree(op); + return ERR_CAST(key); + } + } else { + key_get(key); + } + + op->key = key; + op->volume = afs_get_volume(volume, afs_volume_trace_get_new_op); + op->net = volume->cell->net; + op->cb_v_break = volume->cb_v_break; + op->debug_id = atomic_inc_return(&afs_operation_debug_counter); + op->error = -EDESTADDRREQ; + op->ac.error = SHRT_MAX; + + _leave(" = [op=%08x]", op->debug_id); + return op; +} + +/* + * Lock the vnode(s) being operated upon. + */ +static bool afs_get_io_locks(struct afs_operation *op) +{ + struct afs_vnode *vnode = op->file[0].vnode; + struct afs_vnode *vnode2 = op->file[1].vnode; + + _enter(""); + + if (op->flags & AFS_OPERATION_UNINTR) { + mutex_lock(&vnode->io_lock); + op->flags |= AFS_OPERATION_LOCK_0; + _leave(" = t [1]"); + return true; + } + + if (!vnode2 || !op->file[1].need_io_lock || vnode == vnode2) + vnode2 = NULL; + + if (vnode2 > vnode) + swap(vnode, vnode2); + + if (mutex_lock_interruptible(&vnode->io_lock) < 0) { + op->error = -EINTR; + op->flags |= AFS_OPERATION_STOP; + _leave(" = f [I 0]"); + return false; + } + op->flags |= AFS_OPERATION_LOCK_0; + + if (vnode2) { + if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) { + op->error = -EINTR; + op->flags |= AFS_OPERATION_STOP; + mutex_unlock(&vnode->io_lock); + op->flags &= ~AFS_OPERATION_LOCK_0; + _leave(" = f [I 1]"); + return false; + } + op->flags |= AFS_OPERATION_LOCK_1; + } + + _leave(" = t [2]"); + return true; +} + +static void afs_drop_io_locks(struct afs_operation *op) +{ + struct afs_vnode *vnode = op->file[0].vnode; + struct afs_vnode *vnode2 = op->file[1].vnode; + + _enter(""); + + if (op->flags & AFS_OPERATION_LOCK_1) + mutex_unlock(&vnode2->io_lock); + if (op->flags & AFS_OPERATION_LOCK_0) + mutex_unlock(&vnode->io_lock); +} + +static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp, + unsigned int index) +{ + struct afs_vnode *vnode = vp->vnode; + + if (vnode) { + vp->fid = vnode->fid; + vp->dv_before = vnode->status.data_version; + vp->cb_break_before = afs_calc_vnode_cb_break(vnode); + if (vnode->lock_state != AFS_VNODE_LOCK_NONE) + op->flags |= AFS_OPERATION_CUR_ONLY; + } + + if (vp->fid.vnode) + _debug("PREP[%u] {%llx:%llu.%u}", + index, vp->fid.vid, vp->fid.vnode, vp->fid.unique); +} + +/* + * Begin an operation on the fileserver. + * + * Fileserver operations are serialised on the server by vnode, so we serialise + * them here also using the io_lock. + */ +bool afs_begin_vnode_operation(struct afs_operation *op) +{ + struct afs_vnode *vnode = op->file[0].vnode; + + ASSERT(vnode); + + _enter(""); + + if (op->file[0].need_io_lock) + if (!afs_get_io_locks(op)) + return false; + + afs_prepare_vnode(op, &op->file[0], 0); + afs_prepare_vnode(op, &op->file[1], 1); + op->cb_v_break = op->volume->cb_v_break; + _leave(" = true"); + return true; +} + +/* + * Tidy up a filesystem cursor and unlock the vnode. + */ +static void afs_end_vnode_operation(struct afs_operation *op) +{ + _enter(""); + + if (op->error == -EDESTADDRREQ || + op->error == -EADDRNOTAVAIL || + op->error == -ENETUNREACH || + op->error == -EHOSTUNREACH) + afs_dump_edestaddrreq(op); + + afs_drop_io_locks(op); + + if (op->error == -ECONNABORTED) + op->error = afs_abort_to_error(op->ac.abort_code); +} + +/* + * Wait for an in-progress operation to complete. + */ +void afs_wait_for_operation(struct afs_operation *op) +{ + _enter(""); + + while (afs_select_fileserver(op)) { + op->cb_s_break = op->server->cb_s_break; + if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) && + op->ops->issue_yfs_rpc) + op->ops->issue_yfs_rpc(op); + else + op->ops->issue_afs_rpc(op); + + op->error = afs_wait_for_call_to_complete(op->call, &op->ac); + } + + if (op->error == 0) { + _debug("success"); + op->ops->success(op); + } + + afs_end_vnode_operation(op); + + if (op->error == 0 && op->ops->edit_dir) { + _debug("edit_dir"); + op->ops->edit_dir(op); + } + _leave(""); +} + +/* + * Dispose of an operation. + */ +int afs_put_operation(struct afs_operation *op) +{ + int i, ret = op->error; + + _enter("op=%08x,%d", op->debug_id, ret); + + if (op->ops && op->ops->put) + op->ops->put(op); + if (op->file[0].put_vnode) + iput(&op->file[0].vnode->vfs_inode); + if (op->file[1].put_vnode) + iput(&op->file[1].vnode->vfs_inode); + + if (op->more_files) { + for (i = 0; i < op->nr_files - 2; i++) + if (op->more_files[i].put_vnode) + iput(&op->more_files[i].vnode->vfs_inode); + kfree(op->more_files); + } + + afs_end_cursor(&op->ac); + afs_put_serverlist(op->net, op->server_list); + afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op); + kfree(op); + return ret; +} + +int afs_do_sync_operation(struct afs_operation *op) +{ + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); + return afs_put_operation(op); +} diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index 37d1bba57b00..b34f74b0f319 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* AFS fileserver probing * - * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2018, 2020 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ @@ -11,15 +11,86 @@ #include "internal.h" #include "protocol_yfs.h" -static bool afs_fs_probe_done(struct afs_server *server) +static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ; +static unsigned int afs_fs_probe_slow_poll_interval = 5 * 60 * HZ; + +/* + * Start the probe polling timer. We have to supply it with an inc on the + * outstanding server count. + */ +static void afs_schedule_fs_probe(struct afs_net *net, + struct afs_server *server, bool fast) +{ + unsigned long atj; + + if (!net->live) + return; + + atj = server->probed_at; + atj += fast ? afs_fs_probe_fast_poll_interval : afs_fs_probe_slow_poll_interval; + + afs_inc_servers_outstanding(net); + if (timer_reduce(&net->fs_probe_timer, atj)) + afs_dec_servers_outstanding(net); +} + +/* + * Handle the completion of a set of probes. + */ +static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server) +{ + bool responded = server->probe.responded; + + write_seqlock(&net->fs_lock); + if (responded) { + list_add_tail(&server->probe_link, &net->fs_probe_slow); + } else { + server->rtt = UINT_MAX; + clear_bit(AFS_SERVER_FL_RESPONDING, &server->flags); + list_add_tail(&server->probe_link, &net->fs_probe_fast); + } + write_sequnlock(&net->fs_lock); + + afs_schedule_fs_probe(net, server, !responded); +} + +/* + * Handle the completion of a probe. + */ +static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server) +{ + _enter(""); + + if (atomic_dec_and_test(&server->probe_outstanding)) + afs_finished_fs_probe(net, server); + + wake_up_all(&server->probe_wq); +} + +/* + * Handle inability to send a probe due to ENOMEM when trying to allocate a + * call struct. + */ +static void afs_fs_probe_not_done(struct afs_net *net, + struct afs_server *server, + struct afs_addr_cursor *ac) { - if (!atomic_dec_and_test(&server->probe_outstanding)) - return false; + struct afs_addr_list *alist = ac->alist; + unsigned int index = ac->index; + + _enter(""); + + trace_afs_io_error(0, -ENOMEM, afs_io_error_fs_probe_fail); + spin_lock(&server->probe_lock); - wake_up_var(&server->probe_outstanding); - clear_bit_unlock(AFS_SERVER_FL_PROBING, &server->flags); - wake_up_bit(&server->flags, AFS_SERVER_FL_PROBING); - return true; + server->probe.local_failure = true; + if (server->probe.error == 0) + server->probe.error = -ENOMEM; + + set_bit(index, &alist->failed); + + spin_unlock(&server->probe_lock); + return afs_done_one_fs_probe(net, server); } /* @@ -30,10 +101,8 @@ void afs_fileserver_probe_result(struct afs_call *call) { struct afs_addr_list *alist = call->alist; struct afs_server *server = call->server; - unsigned int server_index = call->server_index; unsigned int index = call->addr_ix; unsigned int rtt_us = 0; - bool have_result = false; int ret = call->error; _enter("%pU,%u", &server->uuid, index); @@ -52,8 +121,9 @@ void afs_fileserver_probe_result(struct afs_call *call) goto responded; case -ENOMEM: case -ENONET: + clear_bit(index, &alist->responded); server->probe.local_failure = true; - afs_io_error(call, afs_io_error_fs_probe_fail); + trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail); goto out; case -ECONNRESET: /* Responded, but call expired. */ case -ERFKILL: @@ -72,12 +142,11 @@ void afs_fileserver_probe_result(struct afs_call *call) server->probe.error == -ETIMEDOUT || server->probe.error == -ETIME)) server->probe.error = ret; - afs_io_error(call, afs_io_error_fs_probe_fail); + trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail); goto out; } responded: - set_bit(index, &alist->responded); clear_bit(index, &alist->failed); if (call->service_id == YFS_FS_SERVICE) { @@ -95,39 +164,34 @@ responded: rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); if (rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; + server->rtt = rtt_us; alist->preferred = index; - have_result = true; } smp_wmb(); /* Set rtt before responded. */ server->probe.responded = true; - set_bit(AFS_SERVER_FL_PROBED, &server->flags); + set_bit(index, &alist->responded); + set_bit(AFS_SERVER_FL_RESPONDING, &server->flags); out: spin_unlock(&server->probe_lock); - _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", - server_index, index, &alist->addrs[index].transport, rtt_us, ret); + _debug("probe %pU [%u] %pISpc rtt=%u ret=%d", + &server->uuid, index, &alist->addrs[index].transport, + rtt_us, ret); - have_result |= afs_fs_probe_done(server); - if (have_result) - wake_up_all(&server->probe_wq); + return afs_done_one_fs_probe(call->net, server); } /* - * Probe all of a fileserver's addresses to find out the best route and to - * query its capabilities. + * Probe one or all of a fileserver's addresses to find out the best route and + * to query its capabilities. */ -static int afs_do_probe_fileserver(struct afs_net *net, - struct afs_server *server, - struct key *key, - unsigned int server_index, - struct afs_error *_e) +void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server, + struct key *key, bool all) { struct afs_addr_cursor ac = { .index = 0, }; - struct afs_call *call; - bool in_progress = false; _enter("%pU", &server->uuid); @@ -137,50 +201,25 @@ static int afs_do_probe_fileserver(struct afs_net *net, afs_get_addrlist(ac.alist); read_unlock(&server->fs_lock); - atomic_set(&server->probe_outstanding, ac.alist->nr_addrs); + server->probed_at = jiffies; + atomic_set(&server->probe_outstanding, all ? ac.alist->nr_addrs : 1); memset(&server->probe, 0, sizeof(server->probe)); server->probe.rtt = UINT_MAX; - for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { - call = afs_fs_get_capabilities(net, server, &ac, key, server_index); - if (!IS_ERR(call)) { - afs_put_call(call); - in_progress = true; - } else { - afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code); - } - } - - if (!in_progress) - afs_fs_probe_done(server); - afs_put_addrlist(ac.alist); - return in_progress; -} + ac.index = ac.alist->preferred; + if (ac.index < 0 || ac.index >= ac.alist->nr_addrs) + all = true; -/* - * Send off probes to all unprobed servers. - */ -int afs_probe_fileservers(struct afs_net *net, struct key *key, - struct afs_server_list *list) -{ - struct afs_server *server; - struct afs_error e; - bool in_progress = false; - int i; - - e.error = 0; - e.responded = false; - for (i = 0; i < list->nr_servers; i++) { - server = list->servers[i].server; - if (test_bit(AFS_SERVER_FL_PROBED, &server->flags)) - continue; - - if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags) && - afs_do_probe_fileserver(net, server, key, i, &e)) - in_progress = true; + if (all) { + for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) + if (!afs_fs_get_capabilities(net, server, &ac, key)) + afs_fs_probe_not_done(net, server, &ac); + } else { + if (!afs_fs_get_capabilities(net, server, &ac, key)) + afs_fs_probe_not_done(net, server, &ac); } - return in_progress ? 0 : e.error; + afs_put_addrlist(ac.alist); } /* @@ -190,7 +229,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried) { struct wait_queue_entry *waits; struct afs_server *server; - unsigned int rtt = UINT_MAX; + unsigned int rtt = UINT_MAX, rtt_s; bool have_responders = false; int pref = -1, i; @@ -200,7 +239,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried) for (i = 0; i < slist->nr_servers; i++) { if (test_bit(i, &untried)) { server = slist->servers[i].server; - if (!test_bit(AFS_SERVER_FL_PROBING, &server->flags)) + if (!atomic_read(&server->probe_outstanding)) __clear_bit(i, &untried); if (server->probe.responded) have_responders = true; @@ -230,7 +269,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried) server = slist->servers[i].server; if (server->probe.responded) goto stop; - if (test_bit(AFS_SERVER_FL_PROBING, &server->flags)) + if (atomic_read(&server->probe_outstanding)) still_probing = true; } } @@ -246,10 +285,11 @@ stop: for (i = 0; i < slist->nr_servers; i++) { if (test_bit(i, &untried)) { server = slist->servers[i].server; - if (server->probe.responded && - server->probe.rtt < rtt) { + rtt_s = READ_ONCE(server->rtt); + if (test_bit(AFS_SERVER_FL_RESPONDING, &server->flags) && + rtt_s < rtt) { pref = i; - rtt = server->probe.rtt; + rtt = rtt_s; } remove_wait_queue(&server->probe_wq, &waits[i]); @@ -265,3 +305,156 @@ stop: slist->preferred = pref; return 0; } + +/* + * Probe timer. We have an increment on fs_outstanding that we need to pass + * along to the work item. + */ +void afs_fs_probe_timer(struct timer_list *timer) +{ + struct afs_net *net = container_of(timer, struct afs_net, fs_probe_timer); + + if (!queue_work(afs_wq, &net->fs_prober)) + afs_dec_servers_outstanding(net); +} + +/* + * Dispatch a probe to a server. + */ +static void afs_dispatch_fs_probe(struct afs_net *net, struct afs_server *server, bool all) + __releases(&net->fs_lock) +{ + struct key *key = NULL; + + /* We remove it from the queues here - it will be added back to + * one of the queues on the completion of the probe. + */ + list_del_init(&server->probe_link); + + afs_get_server(server, afs_server_trace_get_probe); + write_sequnlock(&net->fs_lock); + + afs_fs_probe_fileserver(net, server, key, all); + afs_put_server(net, server, afs_server_trace_put_probe); +} + +/* + * Probe a server immediately without waiting for its due time to come + * round. This is used when all of the addresses have been tried. + */ +void afs_probe_fileserver(struct afs_net *net, struct afs_server *server) +{ + write_seqlock(&net->fs_lock); + if (!list_empty(&server->probe_link)) + return afs_dispatch_fs_probe(net, server, true); + write_sequnlock(&net->fs_lock); +} + +/* + * Probe dispatcher to regularly dispatch probes to keep NAT alive. + */ +void afs_fs_probe_dispatcher(struct work_struct *work) +{ + struct afs_net *net = container_of(work, struct afs_net, fs_prober); + struct afs_server *fast, *slow, *server; + unsigned long nowj, timer_at, poll_at; + bool first_pass = true, set_timer = false; + + if (!net->live) + return; + + _enter(""); + + if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) { + _leave(" [none]"); + return; + } + +again: + write_seqlock(&net->fs_lock); + + fast = slow = server = NULL; + nowj = jiffies; + timer_at = nowj + MAX_JIFFY_OFFSET; + + if (!list_empty(&net->fs_probe_fast)) { + fast = list_first_entry(&net->fs_probe_fast, struct afs_server, probe_link); + poll_at = fast->probed_at + afs_fs_probe_fast_poll_interval; + if (time_before(nowj, poll_at)) { + timer_at = poll_at; + set_timer = true; + fast = NULL; + } + } + + if (!list_empty(&net->fs_probe_slow)) { + slow = list_first_entry(&net->fs_probe_slow, struct afs_server, probe_link); + poll_at = slow->probed_at + afs_fs_probe_slow_poll_interval; + if (time_before(nowj, poll_at)) { + if (time_before(poll_at, timer_at)) + timer_at = poll_at; + set_timer = true; + slow = NULL; + } + } + + server = fast ?: slow; + if (server) + _debug("probe %pU", &server->uuid); + + if (server && (first_pass || !need_resched())) { + afs_dispatch_fs_probe(net, server, server == fast); + first_pass = false; + goto again; + } + + write_sequnlock(&net->fs_lock); + + if (server) { + if (!queue_work(afs_wq, &net->fs_prober)) + afs_dec_servers_outstanding(net); + _leave(" [requeue]"); + } else if (set_timer) { + if (timer_reduce(&net->fs_probe_timer, timer_at)) + afs_dec_servers_outstanding(net); + _leave(" [timer]"); + } else { + afs_dec_servers_outstanding(net); + _leave(" [quiesce]"); + } +} + +/* + * Wait for a probe on a particular fileserver to complete for 2s. + */ +int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr) +{ + struct wait_queue_entry wait; + unsigned long timo = 2 * HZ; + + if (atomic_read(&server->probe_outstanding) == 0) + goto dont_wait; + + init_wait_entry(&wait, 0); + for (;;) { + prepare_to_wait_event(&server->probe_wq, &wait, + is_intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + if (timo == 0 || + server->probe.responded || + atomic_read(&server->probe_outstanding) == 0 || + (is_intr && signal_pending(current))) + break; + timo = schedule_timeout(timo); + } + + finish_wait(&server->probe_wq, &wait); + +dont_wait: + if (server->probe.responded) + return 0; + if (is_intr && signal_pending(current)) + return -ERESTARTSYS; + if (timo == 0) + return -ETIME; + return -EDESTADDRREQ; +} diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index d2b3798c1932..acb4d0ca2649 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -13,12 +13,6 @@ #include "internal.h" #include "afs_fs.h" #include "xdr_fs.h" -#include "protocol_yfs.h" - -static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi) -{ - call->cbi = afs_get_cb_interest(cbi); -} /* * decode an AFSFid block @@ -56,16 +50,15 @@ static void xdr_dump_bad(const __be32 *bp) /* * decode an AFSFetchStatus block */ -static int xdr_decode_AFSFetchStatus(const __be32 **_bp, - struct afs_call *call, - struct afs_status_cb *scb) +static void xdr_decode_AFSFetchStatus(const __be32 **_bp, + struct afs_call *call, + struct afs_status_cb *scb) { const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp; struct afs_file_status *status = &scb->status; bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); u64 data_version, size; u32 type, abort_code; - int ret; abort_code = ntohl(xdr->abort_code); @@ -79,7 +72,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, */ status->abort_code = abort_code; scb->have_error = true; - goto good; + goto advance; } pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); @@ -89,7 +82,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, if (abort_code != 0 && inline_error) { status->abort_code = abort_code; scb->have_error = true; - goto good; + goto advance; } type = ntohl(xdr->type); @@ -125,15 +118,13 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, data_version |= (u64)ntohl(xdr->data_version_hi) << 32; status->data_version = data_version; scb->have_status = true; -good: - ret = 0; advance: *_bp = (const void *)*_bp + sizeof(*xdr); - return ret; + return; bad: xdr_dump_bad(*_bp); - ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + afs_protocol_error(call, afs_eproto_bad_status); goto advance; } @@ -243,8 +234,10 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp, /* * deliver reply data to an FS.FetchStatus */ -static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call) +static int afs_deliver_fs_fetch_status(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; const __be32 *bp; int ret; @@ -254,11 +247,9 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_AFSCallBack(&bp, call, call->out_scb); - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_AFSCallBack(&bp, call, &vp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -267,54 +258,39 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call) /* * FS.FetchStatus operation type */ -static const struct afs_call_type afs_RXFSFetchStatus_vnode = { - .name = "FS.FetchStatus(vnode)", +static const struct afs_call_type afs_RXFSFetchStatus = { + .name = "FS.FetchStatus", .op = afs_FS_FetchStatus, - .deliver = afs_deliver_fs_fetch_status_vnode, + .deliver = afs_deliver_fs_fetch_status, .destructor = afs_flat_call_destructor, }; /* * fetch the status information for a file */ -int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb, - struct afs_volsync *volsync) +void afs_fs_fetch_status(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_fetch_file_status(fc, scb, volsync); - _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus_vnode, + call = afs_alloc_flat_call(op->net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4); - if (!call) { - fc->ac.error = -ENOMEM; - return -ENOMEM; - } - - call->key = fc->key; - call->out_scb = scb; - call->out_volsync = volsync; + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHSTATUS); - bp[1] = htonl(vnode->fid.vid); - bp[2] = htonl(vnode->fid.vnode); - bp[3] = htonl(vnode->fid.unique); + bp[1] = htonl(vp->fid.vid); + bp[2] = htonl(vp->fid.vnode); + bp[3] = htonl(vp->fid.unique); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -322,7 +298,9 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb */ static int afs_deliver_fs_fetch_data(struct afs_call *call) { - struct afs_read *req = call->read_request; + struct afs_operation *op = call->op; + struct afs_vnode_param *vp = &op->file[0]; + struct afs_read *req = op->fetch.req; const __be32 *bp; unsigned int size; int ret; @@ -419,14 +397,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_AFSCallBack(&bp, call, call->out_scb); - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_AFSCallBack(&bp, call, &vp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); - req->data_version = call->out_scb->status.data_version; - req->file_size = call->out_scb->status.size; + req->data_version = vp->scb.status.data_version; + req->file_size = vp->scb.status.size; call->unmarshall++; @@ -449,14 +425,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) return 0; } -static void afs_fetch_data_destructor(struct afs_call *call) -{ - struct afs_read *req = call->read_request; - - afs_put_read(req); - afs_flat_call_destructor(call); -} - /* * FS.FetchData operation type */ @@ -464,102 +432,79 @@ static const struct afs_call_type afs_RXFSFetchData = { .name = "FS.FetchData", .op = afs_FS_FetchData, .deliver = afs_deliver_fs_fetch_data, - .destructor = afs_fetch_data_destructor, + .destructor = afs_flat_call_destructor, }; static const struct afs_call_type afs_RXFSFetchData64 = { .name = "FS.FetchData64", .op = afs_FS_FetchData64, .deliver = afs_deliver_fs_fetch_data, - .destructor = afs_fetch_data_destructor, + .destructor = afs_flat_call_destructor, }; /* * fetch data from a very large file */ -static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, - struct afs_status_cb *scb, - struct afs_read *req) +static void afs_fs_fetch_data64(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; + struct afs_read *req = op->fetch.req; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(""); - call = afs_alloc_flat_call(net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_scb = scb; - call->out_volsync = NULL; - call->read_request = afs_get_read(req); + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHDATA64); - bp[1] = htonl(vnode->fid.vid); - bp[2] = htonl(vnode->fid.vnode); - bp[3] = htonl(vnode->fid.unique); + bp[1] = htonl(vp->fid.vid); + bp[2] = htonl(vp->fid.vnode); + bp[3] = htonl(vp->fid.unique); bp[4] = htonl(upper_32_bits(req->pos)); bp[5] = htonl(lower_32_bits(req->pos)); bp[6] = 0; bp[7] = htonl(lower_32_bits(req->len)); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* * fetch data from a file */ -int afs_fs_fetch_data(struct afs_fs_cursor *fc, - struct afs_status_cb *scb, - struct afs_read *req) +void afs_fs_fetch_data(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + struct afs_read *req = op->fetch.req; __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_fetch_data(fc, scb, req); - if (upper_32_bits(req->pos) || upper_32_bits(req->len) || upper_32_bits(req->pos + req->len)) - return afs_fs_fetch_data64(fc, scb, req); + return afs_fs_fetch_data64(op); _enter(""); - call = afs_alloc_flat_call(net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_scb = scb; - call->out_volsync = NULL; - call->read_request = afs_get_read(req); + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHDATA); - bp[1] = htonl(vnode->fid.vid); - bp[2] = htonl(vnode->fid.vnode); - bp[3] = htonl(vnode->fid.unique); + bp[1] = htonl(vp->fid.vid); + bp[2] = htonl(vp->fid.vnode); + bp[3] = htonl(vp->fid.unique); bp[4] = htonl(lower_32_bits(req->pos)); bp[5] = htonl(lower_32_bits(req->len)); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -567,6 +512,9 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, */ static int afs_deliver_fs_create_vnode(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; @@ -576,15 +524,11 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - xdr_decode_AFSFid(&bp, call->out_fid); - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - xdr_decode_AFSCallBack(&bp, call, call->out_scb); - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFid(&bp, &op->file[1].fid); + xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb); + xdr_decode_AFSCallBack(&bp, call, &vp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -600,6 +544,52 @@ static const struct afs_call_type afs_RXFSCreateFile = { .destructor = afs_flat_call_destructor, }; +/* + * Create a file. + */ +void afs_fs_create_file(struct afs_operation *op) +{ + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_call *call; + size_t namesz, reqsz, padsz; + __be32 *bp; + + _enter(""); + + namesz = name->len; + padsz = (4 - (namesz & 3)) & 3; + reqsz = (5 * 4) + namesz + padsz + (6 * 4); + + call = afs_alloc_flat_call(op->net, &afs_RXFSCreateFile, + reqsz, (3 + 21 + 21 + 3 + 6) * 4); + if (!call) + return afs_op_nomem(op); + + /* marshall the parameters */ + bp = call->request; + *bp++ = htonl(FSCREATEFILE); + *bp++ = htonl(dvp->fid.vid); + *bp++ = htonl(dvp->fid.vnode); + *bp++ = htonl(dvp->fid.unique); + *bp++ = htonl(namesz); + memcpy(bp, name->name, namesz); + bp = (void *) bp + namesz; + if (padsz > 0) { + memset(bp, 0, padsz); + bp = (void *) bp + padsz; + } + *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); + *bp++ = htonl(op->mtime.tv_sec); /* mtime */ + *bp++ = 0; /* owner */ + *bp++ = 0; /* group */ + *bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */ + *bp++ = 0; /* segment size */ + + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); +} + static const struct afs_call_type afs_RXFSMakeDir = { .name = "FS.MakeDir", .op = afs_FS_MakeDir, @@ -608,80 +598,58 @@ static const struct afs_call_type afs_RXFSMakeDir = { }; /* - * create a file or make a directory + * Create a new directory */ -int afs_fs_create(struct afs_fs_cursor *fc, - const char *name, - umode_t mode, - struct afs_status_cb *dvnode_scb, - struct afs_fid *newfid, - struct afs_status_cb *new_scb) +void afs_fs_make_dir(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(dvnode); size_t namesz, reqsz, padsz; __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)){ - if (S_ISDIR(mode)) - return yfs_fs_make_dir(fc, name, mode, dvnode_scb, - newfid, new_scb); - else - return yfs_fs_create_file(fc, name, mode, dvnode_scb, - newfid, new_scb); - } - _enter(""); - namesz = strlen(name); + namesz = name->len; padsz = (4 - (namesz & 3)) & 3; reqsz = (5 * 4) + namesz + padsz + (6 * 4); - call = afs_alloc_flat_call( - net, S_ISDIR(mode) ? &afs_RXFSMakeDir : &afs_RXFSCreateFile, - reqsz, (3 + 21 + 21 + 3 + 6) * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSMakeDir, + reqsz, (3 + 21 + 21 + 3 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; - call->out_fid = newfid; - call->out_scb = new_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; - *bp++ = htonl(S_ISDIR(mode) ? FSMAKEDIR : FSCREATEFILE); - *bp++ = htonl(dvnode->fid.vid); - *bp++ = htonl(dvnode->fid.vnode); - *bp++ = htonl(dvnode->fid.unique); + *bp++ = htonl(FSMAKEDIR); + *bp++ = htonl(dvp->fid.vid); + *bp++ = htonl(dvp->fid.vnode); + *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); - memcpy(bp, name, namesz); + memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); - *bp++ = htonl(dvnode->vfs_inode.i_mtime.tv_sec); /* mtime */ + *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ - *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ + *bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */ *bp++ = 0; /* segment size */ - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &dvnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* - * Deliver reply data to any operation that returns directory status and volume - * sync. + * Deliver reply data to any operation that returns status and volume sync. */ -static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call) +static int afs_deliver_fs_file_status_and_vol(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *vp = &op->file[0]; const __be32 *bp; int ret; @@ -691,81 +659,108 @@ static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* - * FS.RemoveDir/FS.RemoveFile operation type + * FS.RemoveFile operation type */ static const struct afs_call_type afs_RXFSRemoveFile = { .name = "FS.RemoveFile", .op = afs_FS_RemoveFile, - .deliver = afs_deliver_fs_dir_status_and_vol, + .deliver = afs_deliver_fs_file_status_and_vol, .destructor = afs_flat_call_destructor, }; +/* + * Remove a file. + */ +void afs_fs_remove_file(struct afs_operation *op) +{ + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_call *call; + size_t namesz, reqsz, padsz; + __be32 *bp; + + _enter(""); + + namesz = name->len; + padsz = (4 - (namesz & 3)) & 3; + reqsz = (5 * 4) + namesz + padsz; + + call = afs_alloc_flat_call(op->net, &afs_RXFSRemoveFile, + reqsz, (21 + 6) * 4); + if (!call) + return afs_op_nomem(op); + + /* marshall the parameters */ + bp = call->request; + *bp++ = htonl(FSREMOVEFILE); + *bp++ = htonl(dvp->fid.vid); + *bp++ = htonl(dvp->fid.vnode); + *bp++ = htonl(dvp->fid.unique); + *bp++ = htonl(namesz); + memcpy(bp, name->name, namesz); + bp = (void *) bp + namesz; + if (padsz > 0) { + memset(bp, 0, padsz); + bp = (void *) bp + padsz; + } + + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); +} + static const struct afs_call_type afs_RXFSRemoveDir = { .name = "FS.RemoveDir", .op = afs_FS_RemoveDir, - .deliver = afs_deliver_fs_dir_status_and_vol, + .deliver = afs_deliver_fs_file_status_and_vol, .destructor = afs_flat_call_destructor, }; /* - * remove a file or directory + * Remove a directory. */ -int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode, - const char *name, bool isdir, struct afs_status_cb *dvnode_scb) +void afs_fs_remove_dir(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(dvnode); size_t namesz, reqsz, padsz; __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_remove(fc, vnode, name, isdir, dvnode_scb); - _enter(""); - namesz = strlen(name); + namesz = name->len; padsz = (4 - (namesz & 3)) & 3; reqsz = (5 * 4) + namesz + padsz; - call = afs_alloc_flat_call( - net, isdir ? &afs_RXFSRemoveDir : &afs_RXFSRemoveFile, - reqsz, (21 + 6) * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSRemoveDir, + reqsz, (21 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; - *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE); - *bp++ = htonl(dvnode->fid.vid); - *bp++ = htonl(dvnode->fid.vnode); - *bp++ = htonl(dvnode->fid.unique); + *bp++ = htonl(FSREMOVEDIR); + *bp++ = htonl(dvp->fid.vid); + *bp++ = htonl(dvp->fid.vnode); + *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); - memcpy(bp, name, namesz); + memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &dvnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -773,6 +768,9 @@ int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode, */ static int afs_deliver_fs_link(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; @@ -784,13 +782,9 @@ static int afs_deliver_fs_link(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -809,56 +803,44 @@ static const struct afs_call_type afs_RXFSLink = { /* * make a hard link */ -int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode, - const char *name, - struct afs_status_cb *dvnode_scb, - struct afs_status_cb *vnode_scb) +void afs_fs_link(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); size_t namesz, reqsz, padsz; __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_link(fc, vnode, name, dvnode_scb, vnode_scb); - _enter(""); - namesz = strlen(name); + namesz = name->len; padsz = (4 - (namesz & 3)) & 3; reqsz = (5 * 4) + namesz + padsz + (3 * 4); - call = afs_alloc_flat_call(net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; - call->out_scb = vnode_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSLINK); - *bp++ = htonl(dvnode->fid.vid); - *bp++ = htonl(dvnode->fid.vnode); - *bp++ = htonl(dvnode->fid.unique); + *bp++ = htonl(dvp->fid.vid); + *bp++ = htonl(dvp->fid.vnode); + *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); - memcpy(bp, name, namesz); + memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); - - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &vnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); + + trace_afs_make_fs_call1(call, &vp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -866,6 +848,9 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode, */ static int afs_deliver_fs_symlink(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; @@ -877,14 +862,10 @@ static int afs_deliver_fs_symlink(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - xdr_decode_AFSFid(&bp, call->out_fid); - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFid(&bp, &vp->fid); + xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -903,75 +884,58 @@ static const struct afs_call_type afs_RXFSSymlink = { /* * create a symbolic link */ -int afs_fs_symlink(struct afs_fs_cursor *fc, - const char *name, - const char *contents, - struct afs_status_cb *dvnode_scb, - struct afs_fid *newfid, - struct afs_status_cb *new_scb) +void afs_fs_symlink(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(dvnode); size_t namesz, reqsz, padsz, c_namesz, c_padsz; __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_symlink(fc, name, contents, dvnode_scb, - newfid, new_scb); - _enter(""); - namesz = strlen(name); + namesz = name->len; padsz = (4 - (namesz & 3)) & 3; - c_namesz = strlen(contents); + c_namesz = strlen(op->create.symlink); c_padsz = (4 - (c_namesz & 3)) & 3; reqsz = (6 * 4) + namesz + padsz + c_namesz + c_padsz + (6 * 4); - call = afs_alloc_flat_call(net, &afs_RXFSSymlink, reqsz, + call = afs_alloc_flat_call(op->net, &afs_RXFSSymlink, reqsz, (3 + 21 + 21 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; - call->out_fid = newfid; - call->out_scb = new_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSYMLINK); - *bp++ = htonl(dvnode->fid.vid); - *bp++ = htonl(dvnode->fid.vnode); - *bp++ = htonl(dvnode->fid.unique); + *bp++ = htonl(dvp->fid.vid); + *bp++ = htonl(dvp->fid.vnode); + *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); - memcpy(bp, name, namesz); + memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } *bp++ = htonl(c_namesz); - memcpy(bp, contents, c_namesz); + memcpy(bp, op->create.symlink, c_namesz); bp = (void *) bp + c_namesz; if (c_padsz > 0) { memset(bp, 0, c_padsz); bp = (void *) bp + c_padsz; } *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); - *bp++ = htonl(dvnode->vfs_inode.i_mtime.tv_sec); /* mtime */ + *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = htonl(S_IRWXUGO); /* unix mode */ *bp++ = 0; /* segment size */ - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &dvnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -979,6 +943,9 @@ int afs_fs_symlink(struct afs_fs_cursor *fc, */ static int afs_deliver_fs_rename(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *orig_dvp = &op->file[0]; + struct afs_vnode_param *new_dvp = &op->file[1]; const __be32 *bp; int ret; @@ -986,17 +953,13 @@ static int afs_deliver_fs_rename(struct afs_call *call) if (ret < 0) return ret; + bp = call->buffer; /* If the two dirs are the same, we have two copies of the same status * report, so we just decode it twice. */ - bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFetchStatus(&bp, call, &orig_dvp->scb); + xdr_decode_AFSFetchStatus(&bp, call, &new_dvp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -1015,31 +978,22 @@ static const struct afs_call_type afs_RXFSRename = { /* * Rename/move a file or directory. */ -int afs_fs_rename(struct afs_fs_cursor *fc, - const char *orig_name, - struct afs_vnode *new_dvnode, - const char *new_name, - struct afs_status_cb *orig_dvnode_scb, - struct afs_status_cb *new_dvnode_scb) +void afs_fs_rename(struct afs_operation *op) { - struct afs_vnode *orig_dvnode = fc->vnode; + struct afs_vnode_param *orig_dvp = &op->file[0]; + struct afs_vnode_param *new_dvp = &op->file[1]; + const struct qstr *orig_name = &op->dentry->d_name; + const struct qstr *new_name = &op->dentry_2->d_name; struct afs_call *call; - struct afs_net *net = afs_v2net(orig_dvnode); size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz; __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_rename(fc, orig_name, - new_dvnode, new_name, - orig_dvnode_scb, - new_dvnode_scb); - _enter(""); - o_namesz = strlen(orig_name); + o_namesz = orig_name->len; o_padsz = (4 - (o_namesz & 3)) & 3; - n_namesz = strlen(new_name); + n_namesz = new_name->len; n_padsz = (4 - (n_namesz & 3)) & 3; reqsz = (4 * 4) + @@ -1047,51 +1001,46 @@ int afs_fs_rename(struct afs_fs_cursor *fc, (3 * 4) + 4 + n_namesz + n_padsz; - call = afs_alloc_flat_call(net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = orig_dvnode_scb; - call->out_scb = new_dvnode_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSRENAME); - *bp++ = htonl(orig_dvnode->fid.vid); - *bp++ = htonl(orig_dvnode->fid.vnode); - *bp++ = htonl(orig_dvnode->fid.unique); + *bp++ = htonl(orig_dvp->fid.vid); + *bp++ = htonl(orig_dvp->fid.vnode); + *bp++ = htonl(orig_dvp->fid.unique); *bp++ = htonl(o_namesz); - memcpy(bp, orig_name, o_namesz); + memcpy(bp, orig_name->name, o_namesz); bp = (void *) bp + o_namesz; if (o_padsz > 0) { memset(bp, 0, o_padsz); bp = (void *) bp + o_padsz; } - *bp++ = htonl(new_dvnode->fid.vid); - *bp++ = htonl(new_dvnode->fid.vnode); - *bp++ = htonl(new_dvnode->fid.unique); + *bp++ = htonl(new_dvp->fid.vid); + *bp++ = htonl(new_dvp->fid.vnode); + *bp++ = htonl(new_dvp->fid.unique); *bp++ = htonl(n_namesz); - memcpy(bp, new_name, n_namesz); + memcpy(bp, new_name->name, n_namesz); bp = (void *) bp + n_namesz; if (n_padsz > 0) { memset(bp, 0, n_padsz); bp = (void *) bp + n_padsz; } - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name); + afs_make_op_call(op, call, GFP_NOFS); } /* - * deliver reply data to an FS.StoreData + * Deliver reply data to FS.StoreData or FS.StoreStatus */ static int afs_deliver_fs_store_data(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *vp = &op->file[0]; const __be32 *bp; int ret; @@ -1103,10 +1052,8 @@ static int afs_deliver_fs_store_data(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -1132,90 +1079,69 @@ static const struct afs_call_type afs_RXFSStoreData64 = { /* * store a set of pages to a very large file */ -static int afs_fs_store_data64(struct afs_fs_cursor *fc, - struct address_space *mapping, - pgoff_t first, pgoff_t last, - unsigned offset, unsigned to, - loff_t size, loff_t pos, loff_t i_size, - struct afs_status_cb *scb) +static void afs_fs_store_data64(struct afs_operation *op, + loff_t pos, loff_t size, loff_t i_size) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &afs_RXFSStoreData64, + call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData64, (4 + 6 + 3 * 2) * 4, (21 + 6) * 4); if (!call) - return -ENOMEM; + return afs_op_nomem(op); - call->key = fc->key; - call->mapping = mapping; - call->first = first; - call->last = last; - call->first_offset = offset; - call->last_to = to; call->send_pages = true; - call->out_scb = scb; /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTOREDATA64); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); *bp++ = htonl(AFS_SET_MTIME); /* mask */ - *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ + *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = 0; /* unix mode */ *bp++ = 0; /* segment size */ - *bp++ = htonl(pos >> 32); - *bp++ = htonl((u32) pos); - *bp++ = htonl(size >> 32); - *bp++ = htonl((u32) size); - *bp++ = htonl(i_size >> 32); - *bp++ = htonl((u32) i_size); - - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + *bp++ = htonl(upper_32_bits(pos)); + *bp++ = htonl(lower_32_bits(pos)); + *bp++ = htonl(upper_32_bits(size)); + *bp++ = htonl(lower_32_bits(size)); + *bp++ = htonl(upper_32_bits(i_size)); + *bp++ = htonl(lower_32_bits(i_size)); + + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* * store a set of pages */ -int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, - pgoff_t first, pgoff_t last, - unsigned offset, unsigned to, - struct afs_status_cb *scb) +void afs_fs_store_data(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); loff_t size, pos, i_size; __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_store_data(fc, mapping, first, last, offset, to, scb); - _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - size = (loff_t)to - (loff_t)offset; - if (first != last) - size += (loff_t)(last - first) << PAGE_SHIFT; - pos = (loff_t)first << PAGE_SHIFT; - pos += offset; + size = (loff_t)op->store.last_to - (loff_t)op->store.first_offset; + if (op->store.first != op->store.last) + size += (loff_t)(op->store.last - op->store.first) << PAGE_SHIFT; + pos = (loff_t)op->store.first << PAGE_SHIFT; + pos += op->store.first_offset; - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vp->vnode->vfs_inode); if (pos + size > i_size) i_size = size + pos; @@ -1223,73 +1149,38 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, (unsigned long long) size, (unsigned long long) pos, (unsigned long long) i_size); - if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32) - return afs_fs_store_data64(fc, mapping, first, last, offset, to, - size, pos, i_size, scb); + if (upper_32_bits(pos) || upper_32_bits(i_size) || upper_32_bits(size) || + upper_32_bits(pos + size)) + return afs_fs_store_data64(op, pos, size, i_size); - call = afs_alloc_flat_call(net, &afs_RXFSStoreData, + call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData, (4 + 6 + 3) * 4, (21 + 6) * 4); if (!call) - return -ENOMEM; + return afs_op_nomem(op); - call->key = fc->key; - call->mapping = mapping; - call->first = first; - call->last = last; - call->first_offset = offset; - call->last_to = to; call->send_pages = true; - call->out_scb = scb; /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTOREDATA); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); *bp++ = htonl(AFS_SET_MTIME); /* mask */ - *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ + *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = 0; /* unix mode */ *bp++ = 0; /* segment size */ - *bp++ = htonl(pos); - *bp++ = htonl(size); - *bp++ = htonl(i_size); - - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); -} - -/* - * deliver reply data to an FS.StoreStatus - */ -static int afs_deliver_fs_store_status(struct afs_call *call) -{ - const __be32 *bp; - int ret; - - _enter(""); + *bp++ = htonl(lower_32_bits(pos)); + *bp++ = htonl(lower_32_bits(size)); + *bp++ = htonl(lower_32_bits(i_size)); - ret = afs_transfer_reply(call); - if (ret < 0) - return ret; - - /* unmarshall the reply once we've received all of it */ - bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_AFSVolSync(&bp, call->out_volsync); - - _leave(" = 0 [done]"); - return 0; + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1298,21 +1189,21 @@ static int afs_deliver_fs_store_status(struct afs_call *call) static const struct afs_call_type afs_RXFSStoreStatus = { .name = "FS.StoreStatus", .op = afs_FS_StoreStatus, - .deliver = afs_deliver_fs_store_status, + .deliver = afs_deliver_fs_store_data, .destructor = afs_flat_call_destructor, }; static const struct afs_call_type afs_RXFSStoreData_as_Status = { .name = "FS.StoreData", .op = afs_FS_StoreData, - .deliver = afs_deliver_fs_store_status, + .deliver = afs_deliver_fs_store_data, .destructor = afs_flat_call_destructor, }; static const struct afs_call_type afs_RXFSStoreData64_as_Status = { .name = "FS.StoreData64", .op = afs_FS_StoreData64, - .deliver = afs_deliver_fs_store_status, + .deliver = afs_deliver_fs_store_data, .destructor = afs_flat_call_destructor, }; @@ -1320,85 +1211,74 @@ static const struct afs_call_type afs_RXFSStoreData64_as_Status = { * set the attributes on a very large file, using FS.StoreData rather than * FS.StoreStatus so as to alter the file size also */ -static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr, - struct afs_status_cb *scb) +static void afs_fs_setattr_size64(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + struct iattr *attr = op->setattr.attr; __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); ASSERT(attr->ia_valid & ATTR_SIZE); - call = afs_alloc_flat_call(net, &afs_RXFSStoreData64_as_Status, + call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData64_as_Status, (4 + 6 + 3 * 2) * 4, (21 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTOREDATA64); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); xdr_encode_AFS_StoreStatus(&bp, attr); - *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */ - *bp++ = htonl((u32) attr->ia_size); - *bp++ = 0; /* size of write */ + *bp++ = htonl(upper_32_bits(attr->ia_size)); /* position of start of write */ + *bp++ = htonl(lower_32_bits(attr->ia_size)); + *bp++ = 0; /* size of write */ *bp++ = 0; - *bp++ = htonl(attr->ia_size >> 32); /* new file length */ - *bp++ = htonl((u32) attr->ia_size); - - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + *bp++ = htonl(upper_32_bits(attr->ia_size)); /* new file length */ + *bp++ = htonl(lower_32_bits(attr->ia_size)); + + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* * set the attributes on a file, using FS.StoreData rather than FS.StoreStatus * so as to alter the file size also */ -static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr, - struct afs_status_cb *scb) +static void afs_fs_setattr_size(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + struct iattr *attr = op->setattr.attr; __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); ASSERT(attr->ia_valid & ATTR_SIZE); - if (attr->ia_size >> 32) - return afs_fs_setattr_size64(fc, attr, scb); + if (upper_32_bits(attr->ia_size)) + return afs_fs_setattr_size64(op); - call = afs_alloc_flat_call(net, &afs_RXFSStoreData_as_Status, + call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status, (4 + 6 + 3) * 4, (21 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTOREDATA); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); xdr_encode_AFS_StoreStatus(&bp, attr); @@ -1406,57 +1286,44 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr, *bp++ = 0; /* size of write */ *bp++ = htonl(attr->ia_size); /* new file length */ - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* * set the attributes on a file, using FS.StoreData if there's a change in file * size, and FS.StoreStatus otherwise */ -int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr, - struct afs_status_cb *scb) +void afs_fs_setattr(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + struct iattr *attr = op->setattr.attr; __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_setattr(fc, attr, scb); - if (attr->ia_valid & ATTR_SIZE) - return afs_fs_setattr_size(fc, attr, scb); + return afs_fs_setattr_size(op); _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &afs_RXFSStoreStatus, + call = afs_alloc_flat_call(op->net, &afs_RXFSStoreStatus, (4 + 6) * 4, (21 + 6) * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTORESTATUS); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); - xdr_encode_AFS_StoreStatus(&bp, attr); + xdr_encode_AFS_StoreStatus(&bp, op->setattr.attr); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1464,6 +1331,7 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr, */ static int afs_deliver_fs_get_volume_status(struct afs_call *call) { + struct afs_operation *op = call->op; const __be32 *bp; char *p; u32 size; @@ -1485,7 +1353,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) return ret; bp = call->buffer; - xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus); + xdr_decode_AFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); /* Fall through */ @@ -1499,8 +1367,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) call->count = ntohl(call->tmp); _debug("volname length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_volname_len); + return afs_protocol_error(call, afs_eproto_volname_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; @@ -1529,8 +1396,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) call->count = ntohl(call->tmp); _debug("offline msg length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_offline_msg_len); + return afs_protocol_error(call, afs_eproto_offline_msg_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; @@ -1560,8 +1426,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) call->count = ntohl(call->tmp); _debug("motd length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_motd_len); + return afs_protocol_error(call, afs_eproto_motd_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; @@ -1601,37 +1466,26 @@ static const struct afs_call_type afs_RXFSGetVolumeStatus = { /* * fetch the status of a volume */ -int afs_fs_get_volume_status(struct afs_fs_cursor *fc, - struct afs_volume_status *vs) +void afs_fs_get_volume_status(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_get_volume_status(fc, vs); - _enter(""); - call = afs_alloc_flat_call(net, &afs_RXFSGetVolumeStatus, 2 * 4, + call = afs_alloc_flat_call(op->net, &afs_RXFSGetVolumeStatus, 2 * 4, max(12 * 4, AFSOPAQUEMAX + 1)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_volstatus = vs; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSGETVOLUMESTATUS); - bp[1] = htonl(vnode->fid.vid); + bp[1] = htonl(vp->fid.vid); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1639,6 +1493,7 @@ int afs_fs_get_volume_status(struct afs_fs_cursor *fc, */ static int afs_deliver_fs_xxxx_lock(struct afs_call *call) { + struct afs_operation *op = call->op; const __be32 *bp; int ret; @@ -1650,7 +1505,7 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -1691,114 +1546,80 @@ static const struct afs_call_type afs_RXFSReleaseLock = { /* * Set a lock on a file */ -int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type, - struct afs_status_cb *scb) +void afs_fs_set_lock(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_set_lock(fc, type, scb); - _enter(""); - call = afs_alloc_flat_call(net, &afs_RXFSSetLock, 5 * 4, 6 * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSSetLock, 5 * 4, 6 * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->lvnode = vnode; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSETLOCK); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); - *bp++ = htonl(type); - - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_calli(call, &vnode->fid, type); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); + *bp++ = htonl(op->lock.type); + + trace_afs_make_fs_calli(call, &vp->fid, op->lock.type); + afs_make_op_call(op, call, GFP_NOFS); } /* * extend a lock on a file */ -int afs_fs_extend_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb) +void afs_fs_extend_lock(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_extend_lock(fc, scb); - _enter(""); - call = afs_alloc_flat_call(net, &afs_RXFSExtendLock, 4 * 4, 6 * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSExtendLock, 4 * 4, 6 * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->lvnode = vnode; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSEXTENDLOCK); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); - - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); + + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* * release a lock on a file */ -int afs_fs_release_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb) +void afs_fs_release_lock(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_release_lock(fc, scb); - _enter(""); - call = afs_alloc_flat_call(net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4); + call = afs_alloc_flat_call(op->net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->lvnode = vnode; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSRELEASELOCK); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); - - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); + + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1842,7 +1663,7 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net, bp = call->request; *bp++ = htonl(FSGIVEUPALLCALLBACKS); - /* Can't take a ref on server */ + call->server = afs_use_server(server, afs_server_trace_give_up_cb); afs_make_call(ac, call, GFP_NOFS); return afs_wait_for_call_to_complete(call, ac); } @@ -1905,14 +1726,13 @@ static const struct afs_call_type afs_RXFSGetCapabilities = { }; /* - * Probe a fileserver for the capabilities that it supports. This can - * return up to 196 words. + * Probe a fileserver for the capabilities that it supports. This RPC can + * reply with up to 196 words. The operation is asynchronous and if we managed + * to allocate a call, true is returned the result is delivered through the + * ->done() - otherwise we return false to indicate we didn't even try. */ -struct afs_call *afs_fs_get_capabilities(struct afs_net *net, - struct afs_server *server, - struct afs_addr_cursor *ac, - struct key *key, - unsigned int server_index) +bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server, + struct afs_addr_cursor *ac, struct key *key) { struct afs_call *call; __be32 *bp; @@ -1921,11 +1741,10 @@ struct afs_call *afs_fs_get_capabilities(struct afs_net *net, call = afs_alloc_flat_call(net, &afs_RXFSGetCapabilities, 1 * 4, 16 * 4); if (!call) - return ERR_PTR(-ENOMEM); + return false; call->key = key; - call->server = afs_get_server(server, afs_server_trace_get_caps); - call->server_index = server_index; + call->server = afs_use_server(server, afs_server_trace_get_caps); call->upgrade = true; call->async = true; call->max_lifespan = AFS_PROBE_MAX_LIFESPAN; @@ -1934,87 +1753,10 @@ struct afs_call *afs_fs_get_capabilities(struct afs_net *net, bp = call->request; *bp++ = htonl(FSGETCAPABILITIES); - /* Can't take a ref on server */ trace_afs_make_fs_call(call, NULL); afs_make_call(ac, call, GFP_NOFS); - return call; -} - -/* - * Deliver reply data to an FS.FetchStatus with no vnode. - */ -static int afs_deliver_fs_fetch_status(struct afs_call *call) -{ - const __be32 *bp; - int ret; - - ret = afs_transfer_reply(call); - if (ret < 0) - return ret; - - /* unmarshall the reply once we've received all of it */ - bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_AFSCallBack(&bp, call, call->out_scb); - xdr_decode_AFSVolSync(&bp, call->out_volsync); - - _leave(" = 0 [done]"); - return 0; -} - -/* - * FS.FetchStatus operation type - */ -static const struct afs_call_type afs_RXFSFetchStatus = { - .name = "FS.FetchStatus", - .op = afs_FS_FetchStatus, - .deliver = afs_deliver_fs_fetch_status, - .destructor = afs_flat_call_destructor, -}; - -/* - * Fetch the status information for a fid without needing a vnode handle. - */ -int afs_fs_fetch_status(struct afs_fs_cursor *fc, - struct afs_net *net, - struct afs_fid *fid, - struct afs_status_cb *scb, - struct afs_volsync *volsync) -{ - struct afs_call *call; - __be32 *bp; - - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_fetch_status(fc, net, fid, scb, volsync); - - _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), fid->vid, fid->vnode); - - call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4); - if (!call) { - fc->ac.error = -ENOMEM; - return -ENOMEM; - } - - call->key = fc->key; - call->out_fid = fid; - call->out_scb = scb; - call->out_volsync = volsync; - - /* marshall the parameters */ - bp = call->request; - bp[0] = htonl(FSFETCHSTATUS); - bp[1] = htonl(fid->vid); - bp[2] = htonl(fid->vnode); - bp[3] = htonl(fid->unique); - - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + afs_put_call(call); + return true; } /* @@ -2022,6 +1764,7 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc, */ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) { + struct afs_operation *op = call->op; struct afs_status_cb *scb; const __be32 *bp; u32 tmp; @@ -2043,10 +1786,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) return ret; tmp = ntohl(call->tmp); - _debug("status count: %u/%u", tmp, call->count2); - if (tmp != call->count2) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_ibulkst_count); + _debug("status count: %u/%u", tmp, op->nr_files); + if (tmp != op->nr_files) + return afs_protocol_error(call, afs_eproto_ibulkst_count); call->count = 0; call->unmarshall++; @@ -2060,14 +1802,23 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) if (ret < 0) return ret; + switch (call->count) { + case 0: + scb = &op->file[0].scb; + break; + case 1: + scb = &op->file[1].scb; + break; + default: + scb = &op->more_files[call->count - 2].scb; + break; + } + bp = call->buffer; - scb = &call->out_scb[call->count]; - ret = xdr_decode_AFSFetchStatus(&bp, call, scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, scb); call->count++; - if (call->count < call->count2) + if (call->count < op->nr_files) goto more_counts; call->count = 0; @@ -2084,9 +1835,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) tmp = ntohl(call->tmp); _debug("CB count: %u", tmp); - if (tmp != call->count2) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_ibulkst_cb_count); + if (tmp != op->nr_files) + return afs_protocol_error(call, afs_eproto_ibulkst_cb_count); call->count = 0; call->unmarshall++; more_cbs: @@ -2100,11 +1850,22 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) return ret; _debug("unmarshall CB array"); + switch (call->count) { + case 0: + scb = &op->file[0].scb; + break; + case 1: + scb = &op->file[1].scb; + break; + default: + scb = &op->more_files[call->count - 2].scb; + break; + } + bp = call->buffer; - scb = &call->out_scb[call->count]; xdr_decode_AFSCallBack(&bp, call, scb); call->count++; - if (call->count < call->count2) + if (call->count < op->nr_files) goto more_cbs; afs_extract_to_buf(call, 6 * sizeof(__be32)); @@ -2117,7 +1878,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) return ret; bp = call->buffer; - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSVolSync(&bp, &op->volsync); call->unmarshall++; @@ -2129,6 +1890,16 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) return 0; } +static void afs_done_fs_inline_bulk_status(struct afs_call *call) +{ + if (call->error == -ECONNABORTED && + call->abort_code == RX_INVALID_OPERATION) { + set_bit(AFS_SERVER_FL_NO_IBULK, &call->server->flags); + if (call->op) + set_bit(AFS_VOLUME_MAYBE_NO_IBULK, &call->op->volume->flags); + } +} + /* * FS.InlineBulkStatus operation type */ @@ -2136,58 +1907,53 @@ static const struct afs_call_type afs_RXFSInlineBulkStatus = { .name = "FS.InlineBulkStatus", .op = afs_FS_InlineBulkStatus, .deliver = afs_deliver_fs_inline_bulk_status, + .done = afs_done_fs_inline_bulk_status, .destructor = afs_flat_call_destructor, }; /* * Fetch the status information for up to 50 files */ -int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc, - struct afs_net *net, - struct afs_fid *fids, - struct afs_status_cb *statuses, - unsigned int nr_fids, - struct afs_volsync *volsync) +void afs_fs_inline_bulk_status(struct afs_operation *op) { + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; struct afs_call *call; __be32 *bp; int i; - if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) - return yfs_fs_inline_bulk_status(fc, net, fids, statuses, - nr_fids, volsync); + if (test_bit(AFS_SERVER_FL_NO_IBULK, &op->server->flags)) { + op->error = -ENOTSUPP; + return; + } _enter(",%x,{%llx:%llu},%u", - key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids); + key_serial(op->key), vp->fid.vid, vp->fid.vnode, op->nr_files); - call = afs_alloc_flat_call(net, &afs_RXFSInlineBulkStatus, - (2 + nr_fids * 3) * 4, + call = afs_alloc_flat_call(op->net, &afs_RXFSInlineBulkStatus, + (2 + op->nr_files * 3) * 4, 21 * 4); - if (!call) { - fc->ac.error = -ENOMEM; - return -ENOMEM; - } - - call->key = fc->key; - call->out_scb = statuses; - call->out_volsync = volsync; - call->count2 = nr_fids; + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSINLINEBULKSTATUS); - *bp++ = htonl(nr_fids); - for (i = 0; i < nr_fids; i++) { - *bp++ = htonl(fids[i].vid); - *bp++ = htonl(fids[i].vnode); - *bp++ = htonl(fids[i].unique); + *bp++ = htonl(op->nr_files); + *bp++ = htonl(dvp->fid.vid); + *bp++ = htonl(dvp->fid.vnode); + *bp++ = htonl(dvp->fid.unique); + *bp++ = htonl(vp->fid.vid); + *bp++ = htonl(vp->fid.vnode); + *bp++ = htonl(vp->fid.unique); + for (i = 0; i < op->nr_files - 2; i++) { + *bp++ = htonl(op->more_files[i].fid.vid); + *bp++ = htonl(op->more_files[i].fid.vnode); + *bp++ = htonl(op->more_files[i].fid.unique); } - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &fids[0]); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -2195,6 +1961,8 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc, */ static int afs_deliver_fs_fetch_acl(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *vp = &op->file[0]; struct afs_acl *acl; const __be32 *bp; unsigned int size; @@ -2220,7 +1988,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) acl = kmalloc(struct_size(acl, data, size), GFP_KERNEL); if (!acl) return -ENOMEM; - call->ret_acl = acl; + op->acl = acl; acl->size = call->count2; afs_extract_begin(call, acl->data, size); call->unmarshall++; @@ -2243,10 +2011,8 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_AFSVolSync(&bp, call->out_volsync); + xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_AFSVolSync(&bp, &op->volsync); call->unmarshall++; @@ -2258,12 +2024,6 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) return 0; } -static void afs_destroy_fs_fetch_acl(struct afs_call *call) -{ - kfree(call->ret_acl); - afs_flat_call_destructor(call); -} - /* * FS.FetchACL operation type */ @@ -2271,68 +2031,33 @@ static const struct afs_call_type afs_RXFSFetchACL = { .name = "FS.FetchACL", .op = afs_FS_FetchACL, .deliver = afs_deliver_fs_fetch_acl, - .destructor = afs_destroy_fs_fetch_acl, }; /* * Fetch the ACL for a file. */ -struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *fc, - struct afs_status_cb *scb) +void afs_fs_fetch_acl(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &afs_RXFSFetchACL, 16, (21 + 6) * 4); - if (!call) { - fc->ac.error = -ENOMEM; - return ERR_PTR(-ENOMEM); - } - - call->key = fc->key; - call->ret_acl = NULL; - call->out_scb = scb; - call->out_volsync = NULL; + call = afs_alloc_flat_call(op->net, &afs_RXFSFetchACL, 16, (21 + 6) * 4); + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHACL); - bp[1] = htonl(vnode->fid.vid); - bp[2] = htonl(vnode->fid.vnode); - bp[3] = htonl(vnode->fid.unique); - - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_make_call(&fc->ac, call, GFP_KERNEL); - return (struct afs_acl *)afs_wait_for_call_to_complete(call, &fc->ac); -} - -/* - * Deliver reply data to any operation that returns file status and volume - * sync. - */ -static int afs_deliver_fs_file_status_and_vol(struct afs_call *call) -{ - const __be32 *bp; - int ret; + bp[1] = htonl(vp->fid.vid); + bp[2] = htonl(vp->fid.vnode); + bp[3] = htonl(vp->fid.unique); - ret = afs_transfer_reply(call); - if (ret < 0) - return ret; - - bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_AFSVolSync(&bp, call->out_volsync); - - _leave(" = 0 [done]"); - return 0; + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_KERNEL); } /* @@ -2348,42 +2073,34 @@ static const struct afs_call_type afs_RXFSStoreACL = { /* * Fetch the ACL for a file. */ -int afs_fs_store_acl(struct afs_fs_cursor *fc, const struct afs_acl *acl, - struct afs_status_cb *scb) +void afs_fs_store_acl(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + const struct afs_acl *acl = op->acl; size_t size; __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); size = round_up(acl->size, 4); - call = afs_alloc_flat_call(net, &afs_RXFSStoreACL, + call = afs_alloc_flat_call(op->net, &afs_RXFSStoreACL, 5 * 4 + size, (21 + 6) * 4); - if (!call) { - fc->ac.error = -ENOMEM; - return -ENOMEM; - } - - call->key = fc->key; - call->out_scb = scb; - call->out_volsync = NULL; + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSSTOREACL); - bp[1] = htonl(vnode->fid.vid); - bp[2] = htonl(vnode->fid.vnode); - bp[3] = htonl(vnode->fid.unique); + bp[1] = htonl(vp->fid.vid); + bp[2] = htonl(vp->fid.vnode); + bp[3] = htonl(vp->fid.unique); bp[4] = htonl(acl->size); memcpy(&bp[5], acl->data, acl->size); if (acl->size != size) memset((void *)&bp[5] + acl->size, 0, size - acl->size); - trace_afs_make_fs_call(call, &vnode->fid); - afs_make_call(&fc->ac, call, GFP_KERNEL); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_KERNEL); } diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 281470fe1183..7dde703df40c 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -67,16 +67,18 @@ static void afs_set_i_size(struct afs_vnode *vnode, u64 size) /* * Initialise an inode from the vnode status. */ -static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key, - struct afs_cb_interest *cbi, - struct afs_vnode *parent_vnode, - struct afs_status_cb *scb) +static int afs_inode_init_from_status(struct afs_operation *op, + struct afs_vnode_param *vp, + struct afs_vnode *vnode) { - struct afs_cb_interest *old_cbi = NULL; - struct afs_file_status *status = &scb->status; + struct afs_file_status *status = &vp->scb.status; struct inode *inode = AFS_VNODE_TO_I(vnode); struct timespec64 t; + _enter("{%llx:%llu.%u} %s", + vp->fid.vid, vp->fid.vnode, vp->fid.unique, + op->type ? op->type->name : "???"); + _debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu", status->type, status->nlink, @@ -86,12 +88,15 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key, write_seqlock(&vnode->cb_lock); + vnode->cb_v_break = op->cb_v_break; + vnode->cb_s_break = op->cb_s_break; vnode->status = *status; t = status->mtime_client; inode->i_ctime = t; inode->i_mtime = t; inode->i_atime = t; + inode->i_flags |= S_NOATIME; inode->i_uid = make_kuid(&init_user_ns, status->owner); inode->i_gid = make_kgid(&init_user_ns, status->group); set_nlink(&vnode->vfs_inode, status->nlink); @@ -128,9 +133,9 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key, inode_nohighmem(inode); break; default: - dump_vnode(vnode, parent_vnode); + dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL); write_sequnlock(&vnode->cb_lock); - return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type); + return afs_protocol_error(NULL, afs_eproto_file_type); } afs_set_i_size(vnode, status->size); @@ -138,39 +143,36 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key, vnode->invalid_before = status->data_version; inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); - if (!scb->have_cb) { + if (!vp->scb.have_cb) { /* it's a symlink we just created (the fileserver * didn't give us a callback) */ vnode->cb_expires_at = ktime_get_real_seconds(); } else { - vnode->cb_expires_at = scb->callback.expires_at; - old_cbi = rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->cb_lock.lock)); - if (cbi != old_cbi) - rcu_assign_pointer(vnode->cb_interest, afs_get_cb_interest(cbi)); - else - old_cbi = NULL; + vnode->cb_expires_at = vp->scb.callback.expires_at; + vnode->cb_server = op->server; set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); } write_sequnlock(&vnode->cb_lock); - afs_put_cb_interest(afs_v2net(vnode), old_cbi); return 0; } /* * Update the core inode struct from a returned status record. */ -static void afs_apply_status(struct afs_fs_cursor *fc, - struct afs_vnode *vnode, - struct afs_status_cb *scb, - const afs_dataversion_t *expected_version) +static void afs_apply_status(struct afs_operation *op, + struct afs_vnode_param *vp) { - struct afs_file_status *status = &scb->status; + struct afs_file_status *status = &vp->scb.status; + struct afs_vnode *vnode = vp->vnode; struct timespec64 t; umode_t mode; bool data_changed = false; + _enter("{%llx:%llu.%u} %s", + vp->fid.vid, vp->fid.vnode, vp->fid.unique, + op->type ? op->type->name : "???"); + BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags)); if (status->type != vnode->status.type) { @@ -179,7 +181,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc, vnode->fid.vnode, vnode->fid.unique, status->type, vnode->status.type); - afs_protocol_error(NULL, -EBADMSG, afs_eproto_bad_status); + afs_protocol_error(NULL, afs_eproto_bad_status); return; } @@ -209,14 +211,13 @@ static void afs_apply_status(struct afs_fs_cursor *fc, vnode->status = *status; - if (expected_version && - *expected_version != status->data_version) { + if (vp->dv_before + vp->dv_delta != status->data_version) { if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s\n", vnode->fid.vid, vnode->fid.vnode, - (unsigned long long)*expected_version, + (unsigned long long)vp->dv_before + vp->dv_delta, (unsigned long long)status->data_version, - fc->type ? fc->type->name : "???"); + op->type ? op->type->name : "???"); vnode->invalid_before = status->data_version; if (vnode->status.type == AFS_FTYPE_DIR) { @@ -243,22 +244,15 @@ static void afs_apply_status(struct afs_fs_cursor *fc, /* * Apply a callback to a vnode. */ -static void afs_apply_callback(struct afs_fs_cursor *fc, - struct afs_vnode *vnode, - struct afs_status_cb *scb, - unsigned int cb_break) +static void afs_apply_callback(struct afs_operation *op, + struct afs_vnode_param *vp) { - struct afs_cb_interest *old; - struct afs_callback *cb = &scb->callback; + struct afs_callback *cb = &vp->scb.callback; + struct afs_vnode *vnode = vp->vnode; - if (!afs_cb_is_broken(cb_break, vnode, fc->cbi)) { + if (!afs_cb_is_broken(vp->cb_break_before, vnode)) { vnode->cb_expires_at = cb->expires_at; - old = rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->cb_lock.lock)); - if (old != fc->cbi) { - rcu_assign_pointer(vnode->cb_interest, afs_get_cb_interest(fc->cbi)); - afs_put_cb_interest(afs_v2net(vnode), old); - } + vnode->cb_server = op->server; set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); } } @@ -267,106 +261,108 @@ static void afs_apply_callback(struct afs_fs_cursor *fc, * Apply the received status and callback to an inode all in the same critical * section to avoid races with afs_validate(). */ -void afs_vnode_commit_status(struct afs_fs_cursor *fc, - struct afs_vnode *vnode, - unsigned int cb_break, - const afs_dataversion_t *expected_version, - struct afs_status_cb *scb) +void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *vp) { - if (fc->ac.error != 0) - return; + struct afs_vnode *vnode = vp->vnode; + + _enter(""); + + ASSERTCMP(op->error, ==, 0); write_seqlock(&vnode->cb_lock); - if (scb->have_error) { - if (scb->status.abort_code == VNOVNODE) { + if (vp->scb.have_error) { + if (vp->scb.status.abort_code == VNOVNODE) { set_bit(AFS_VNODE_DELETED, &vnode->flags); clear_nlink(&vnode->vfs_inode); __afs_break_callback(vnode, afs_cb_break_for_deleted); } } else { - if (scb->have_status) - afs_apply_status(fc, vnode, scb, expected_version); - if (scb->have_cb) - afs_apply_callback(fc, vnode, scb, cb_break); + if (vp->scb.have_status) + afs_apply_status(op, vp); + if (vp->scb.have_cb) + afs_apply_callback(op, vp); } write_sequnlock(&vnode->cb_lock); - if (fc->ac.error == 0 && scb->have_status) - afs_cache_permit(vnode, fc->key, cb_break, scb); + if (op->error == 0 && vp->scb.have_status) + afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb); } +static void afs_fetch_status_success(struct afs_operation *op) +{ + struct afs_vnode_param *vp = &op->file[0]; + struct afs_vnode *vnode = vp->vnode; + int ret; + + if (vnode->vfs_inode.i_state & I_NEW) { + ret = afs_inode_init_from_status(op, vp, vnode); + op->error = ret; + if (ret == 0) + afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb); + } else { + afs_vnode_commit_status(op, vp); + } +} + +static const struct afs_operation_ops afs_fetch_status_operation = { + .issue_afs_rpc = afs_fs_fetch_status, + .issue_yfs_rpc = yfs_fs_fetch_status, + .success = afs_fetch_status_success, +}; + /* * Fetch file status from the volume. */ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool is_new, afs_access_t *_caller_access) { - struct afs_status_cb *scb; - struct afs_fs_cursor fc; - int ret; + struct afs_operation *op; _enter("%s,{%llx:%llu.%u,S=%lx}", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, vnode->flags); - scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - return -ENOMEM; + op = afs_alloc_operation(key, vnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, true)) { - afs_dataversion_t data_version = vnode->status.data_version; + afs_op_set_vnode(op, 0, vnode); - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_fetch_file_status(&fc, scb, NULL); - } - - if (fc.error) { - /* Do nothing. */ - } else if (is_new) { - ret = afs_inode_init_from_status(vnode, key, fc.cbi, - NULL, scb); - fc.error = ret; - if (ret == 0) - afs_cache_permit(vnode, key, fc.cb_break, scb); - } else { - afs_vnode_commit_status(&fc, vnode, fc.cb_break, - &data_version, scb); - } - afs_check_for_remote_deletion(&fc, vnode); - ret = afs_end_vnode_operation(&fc); - } + op->nr_files = 1; + op->ops = &afs_fetch_status_operation; + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); - if (ret == 0 && _caller_access) - *_caller_access = scb->status.caller_access; - kfree(scb); - _leave(" = %d", ret); - return ret; + if (_caller_access) + *_caller_access = op->file[0].scb.status.caller_access; + return afs_put_operation(op); } /* - * iget5() comparator + * ilookup() comparator */ -int afs_iget5_test(struct inode *inode, void *opaque) +int afs_ilookup5_test_by_fid(struct inode *inode, void *opaque) { - struct afs_iget_data *iget_data = opaque; struct afs_vnode *vnode = AFS_FS_I(inode); + struct afs_fid *fid = opaque; - return memcmp(&vnode->fid, &iget_data->fid, sizeof(iget_data->fid)) == 0; + return (fid->vnode == vnode->fid.vnode && + fid->vnode_hi == vnode->fid.vnode_hi && + fid->unique == vnode->fid.unique); } /* - * iget5() comparator for inode created by autocell operations - * - * These pseudo inodes don't match anything. + * iget5() comparator */ -static int afs_iget5_pseudo_dir_test(struct inode *inode, void *opaque) +static int afs_iget5_test(struct inode *inode, void *opaque) { - return 0; + struct afs_vnode_param *vp = opaque; + //struct afs_vnode *vnode = AFS_FS_I(inode); + + return afs_ilookup5_test_by_fid(inode, &vp->fid); } /* @@ -374,99 +370,22 @@ static int afs_iget5_pseudo_dir_test(struct inode *inode, void *opaque) */ static int afs_iget5_set(struct inode *inode, void *opaque) { - struct afs_iget_data *iget_data = opaque; + struct afs_vnode_param *vp = opaque; + struct afs_super_info *as = AFS_FS_S(inode->i_sb); struct afs_vnode *vnode = AFS_FS_I(inode); - vnode->fid = iget_data->fid; - vnode->volume = iget_data->volume; - vnode->cb_v_break = iget_data->cb_v_break; - vnode->cb_s_break = iget_data->cb_s_break; + vnode->volume = as->volume; + vnode->fid = vp->fid; /* YFS supports 96-bit vnode IDs, but Linux only supports * 64-bit inode numbers. */ - inode->i_ino = iget_data->fid.vnode; - inode->i_generation = iget_data->fid.unique; + inode->i_ino = vnode->fid.vnode; + inode->i_generation = vnode->fid.unique; return 0; } /* - * Create an inode for a dynamic root directory or an autocell dynamic - * automount dir. - */ -struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) -{ - struct afs_super_info *as; - struct afs_vnode *vnode; - struct inode *inode; - static atomic_t afs_autocell_ino; - - struct afs_iget_data iget_data = { - .cb_v_break = 0, - .cb_s_break = 0, - }; - - _enter(""); - - as = sb->s_fs_info; - if (as->volume) { - iget_data.volume = as->volume; - iget_data.fid.vid = as->volume->vid; - } - if (root) { - iget_data.fid.vnode = 1; - iget_data.fid.unique = 1; - } else { - iget_data.fid.vnode = atomic_inc_return(&afs_autocell_ino); - iget_data.fid.unique = 0; - } - - inode = iget5_locked(sb, iget_data.fid.vnode, - afs_iget5_pseudo_dir_test, afs_iget5_set, - &iget_data); - if (!inode) { - _leave(" = -ENOMEM"); - return ERR_PTR(-ENOMEM); - } - - _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }", - inode, inode->i_ino, iget_data.fid.vid, iget_data.fid.vnode, - iget_data.fid.unique); - - vnode = AFS_FS_I(inode); - - /* there shouldn't be an existing inode */ - BUG_ON(!(inode->i_state & I_NEW)); - - inode->i_size = 0; - inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; - if (root) { - inode->i_op = &afs_dynroot_inode_operations; - inode->i_fop = &simple_dir_operations; - } else { - inode->i_op = &afs_autocell_inode_operations; - } - set_nlink(inode, 2); - inode->i_uid = GLOBAL_ROOT_UID; - inode->i_gid = GLOBAL_ROOT_GID; - inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode); - inode->i_blocks = 0; - inode_set_iversion_raw(inode, 0); - inode->i_generation = 0; - - set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags); - if (!root) { - set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); - inode->i_flags |= S_AUTOMOUNT; - } - - inode->i_flags |= S_NOATIME; - unlock_new_inode(inode); - _leave(" = %p", inode); - return inode; -} - -/* * Get a cache cookie for an inode. */ static void afs_get_inode_cache(struct afs_vnode *vnode) @@ -501,58 +420,41 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) /* * inode retrieval */ -struct inode *afs_iget(struct super_block *sb, struct key *key, - struct afs_iget_data *iget_data, - struct afs_status_cb *scb, - struct afs_cb_interest *cbi, - struct afs_vnode *parent_vnode) +struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp) { - struct afs_super_info *as; + struct afs_vnode_param *dvp = &op->file[0]; + struct super_block *sb = dvp->vnode->vfs_inode.i_sb; struct afs_vnode *vnode; - struct afs_fid *fid = &iget_data->fid; struct inode *inode; int ret; - _enter(",{%llx:%llu.%u},,", fid->vid, fid->vnode, fid->unique); + _enter(",{%llx:%llu.%u},,", vp->fid.vid, vp->fid.vnode, vp->fid.unique); - as = sb->s_fs_info; - iget_data->volume = as->volume; - - inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set, - iget_data); + inode = iget5_locked(sb, vp->fid.vnode, afs_iget5_test, afs_iget5_set, vp); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } - _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }", - inode, fid->vid, fid->vnode, fid->unique); - vnode = AFS_FS_I(inode); + _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }", + inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); + /* deal with an existing inode */ if (!(inode->i_state & I_NEW)) { _leave(" = %p", inode); return inode; } - if (!scb) { - /* it's a remotely extant inode */ - ret = afs_fetch_status(vnode, key, true, NULL); - if (ret < 0) - goto bad_inode; - } else { - ret = afs_inode_init_from_status(vnode, key, cbi, parent_vnode, - scb); - if (ret < 0) - goto bad_inode; - } + ret = afs_inode_init_from_status(op, vp, vnode); + if (ret < 0) + goto bad_inode; afs_get_inode_cache(vnode); /* success */ clear_bit(AFS_VNODE_UNSET, &vnode->flags); - inode->i_flags |= S_NOATIME; unlock_new_inode(inode); _leave(" = %p", inode); return inode; @@ -564,6 +466,74 @@ bad_inode: return ERR_PTR(ret); } +static int afs_iget5_set_root(struct inode *inode, void *opaque) +{ + struct afs_super_info *as = AFS_FS_S(inode->i_sb); + struct afs_vnode *vnode = AFS_FS_I(inode); + + vnode->volume = as->volume; + vnode->fid.vid = as->volume->vid, + vnode->fid.vnode = 1; + vnode->fid.unique = 1; + inode->i_ino = 1; + inode->i_generation = 1; + return 0; +} + +/* + * Set up the root inode for a volume. This is always vnode 1, unique 1 within + * the volume. + */ +struct inode *afs_root_iget(struct super_block *sb, struct key *key) +{ + struct afs_super_info *as = AFS_FS_S(sb); + struct afs_operation *op; + struct afs_vnode *vnode; + struct inode *inode; + int ret; + + _enter(",{%llx},,", as->volume->vid); + + inode = iget5_locked(sb, 1, NULL, afs_iget5_set_root, NULL); + if (!inode) { + _leave(" = -ENOMEM"); + return ERR_PTR(-ENOMEM); + } + + _debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid); + + BUG_ON(!(inode->i_state & I_NEW)); + + vnode = AFS_FS_I(inode); + vnode->cb_v_break = as->volume->cb_v_break, + + op = afs_alloc_operation(key, as->volume); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto error; + } + + afs_op_set_vnode(op, 0, vnode); + + op->nr_files = 1; + op->ops = &afs_fetch_status_operation; + ret = afs_do_sync_operation(op); + if (ret < 0) + goto error; + + afs_get_inode_cache(vnode); + + clear_bit(AFS_VNODE_UNSET, &vnode->flags); + unlock_new_inode(inode); + _leave(" = %p", inode); + return inode; + +error: + iget_failed(inode); + _leave(" = %d [bad]", ret); + return ERR_PTR(ret); +} + /* * mark the data attached to an inode as obsolete due to a write on the server * - might also want to ditch all the outstanding writes and dirty pages @@ -586,12 +556,30 @@ void afs_zap_data(struct afs_vnode *vnode) } /* + * Get the server reinit counter for a vnode's current server. + */ +static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break) +{ + struct afs_server_list *slist = rcu_dereference(vnode->volume->servers); + struct afs_server *server; + int i; + + for (i = 0; i < slist->nr_servers; i++) { + server = slist->servers[i].server; + if (server == vnode->cb_server) { + *_s_break = READ_ONCE(server->cb_s_break); + return true; + } + } + + return false; +} + +/* * Check the validity of a vnode/inode. */ bool afs_check_validity(struct afs_vnode *vnode) { - struct afs_cb_interest *cbi; - struct afs_server *server; struct afs_volume *volume = vnode->volume; enum afs_cb_break_reason need_clear = afs_cb_break_no_break; time64_t now = ktime_get_real_seconds(); @@ -604,11 +592,8 @@ bool afs_check_validity(struct afs_vnode *vnode) cb_v_break = READ_ONCE(volume->cb_v_break); cb_break = vnode->cb_break; - if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { - cbi = rcu_dereference(vnode->cb_interest); - server = rcu_dereference(cbi->server); - cb_s_break = READ_ONCE(server->cb_s_break); - + if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) && + afs_get_s_break_rcu(vnode, &cb_s_break)) { if (vnode->cb_s_break != cb_s_break || vnode->cb_v_break != cb_v_break) { vnode->cb_s_break = cb_s_break; @@ -755,7 +740,6 @@ int afs_drop_inode(struct inode *inode) */ void afs_evict_inode(struct inode *inode) { - struct afs_cb_interest *cbi; struct afs_vnode *vnode; vnode = AFS_FS_I(inode); @@ -772,15 +756,6 @@ void afs_evict_inode(struct inode *inode) truncate_inode_pages_final(&inode->i_data); clear_inode(inode); - write_seqlock(&vnode->cb_lock); - cbi = rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->cb_lock.lock)); - if (cbi) { - afs_put_cb_interest(afs_i2net(inode), cbi); - rcu_assign_pointer(vnode->cb_interest, NULL); - } - write_sequnlock(&vnode->cb_lock); - while (!list_empty(&vnode->wb_keys)) { struct afs_wb_key *wbk = list_entry(vnode->wb_keys.next, struct afs_wb_key, vnode_link); @@ -808,16 +783,24 @@ void afs_evict_inode(struct inode *inode) _leave(""); } +static void afs_setattr_success(struct afs_operation *op) +{ + afs_vnode_commit_status(op, &op->file[0]); +} + +static const struct afs_operation_ops afs_setattr_operation = { + .issue_afs_rpc = afs_fs_setattr, + .issue_yfs_rpc = yfs_fs_setattr, + .success = afs_setattr_success, +}; + /* * set the attributes of an inode */ int afs_setattr(struct dentry *dentry, struct iattr *attr) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - struct key *key; - int ret = -ENOMEM; _enter("{%llx:%llu},{n=%pd},%x", vnode->fid.vid, vnode->fid.vnode, dentry, @@ -829,48 +812,22 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr) return 0; } - scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); - if (!scb) - goto error; - /* flush any dirty data outstanding on a regular file */ if (S_ISREG(vnode->vfs_inode.i_mode)) filemap_write_and_wait(vnode->vfs_inode.i_mapping); - if (attr->ia_valid & ATTR_FILE) { - key = afs_file_key(attr->ia_file); - } else { - key = afs_request_key(vnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_scb; - } - } - - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, false)) { - afs_dataversion_t data_version = vnode->status.data_version; + op = afs_alloc_operation(((attr->ia_valid & ATTR_FILE) ? + afs_file_key(attr->ia_file) : NULL), + vnode->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - if (attr->ia_valid & ATTR_SIZE) - data_version++; + afs_op_set_vnode(op, 0, vnode); + op->setattr.attr = attr; - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_setattr(&fc, attr, scb); - } - - afs_check_for_remote_deletion(&fc, vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, - &data_version, scb); - ret = afs_end_vnode_operation(&fc); - } + if (attr->ia_valid & ATTR_SIZE) + op->file[0].dv_delta = 1; - if (!(attr->ia_valid & ATTR_FILE)) - key_put(key); - -error_scb: - kfree(scb); -error: - _leave(" = %d", ret); - return ret; + op->ops = &afs_setattr_operation; + return afs_do_sync_operation(op); } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 80255513e230..e1621b0670cc 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -59,13 +59,6 @@ struct afs_fs_context { struct key *key; /* key to use for secure mounting */ }; -struct afs_iget_data { - struct afs_fid fid; - struct afs_volume *volume; /* volume on which resides */ - unsigned int cb_v_break; /* Pre-fetch volume break count */ - unsigned int cb_s_break; /* Pre-fetch server break count */ -}; - enum afs_call_state { AFS_CALL_CL_REQUESTING, /* Client: Request is being sent */ AFS_CALL_CL_AWAIT_REPLY, /* Client: Awaiting reply */ @@ -90,7 +83,6 @@ struct afs_addr_list { unsigned char nr_ipv4; /* Number of IPv4 addresses */ enum dns_record_source source:8; enum dns_lookup_status status:8; - unsigned long probed; /* Mask of servers that have been probed */ unsigned long failed; /* Mask of addrs that failed locally/ICMP */ unsigned long responded; /* Mask of addrs that responded */ struct sockaddr_rxrpc addrs[]; @@ -111,10 +103,7 @@ struct afs_call { struct afs_net *net; /* The network namespace */ struct afs_server *server; /* The fileserver record if fs op (pins ref) */ struct afs_vlserver *vlserver; /* The vlserver record if vl op */ - struct afs_cb_interest *cbi; /* Callback interest for server used */ - struct afs_vnode *lvnode; /* vnode being locked */ void *request; /* request data (first part) */ - struct address_space *mapping; /* Pages being written from */ struct iov_iter def_iter; /* Default buffer/data iterator */ struct iov_iter *iter; /* Iterator currently in use */ union { /* Convenience for ->def_iter */ @@ -126,32 +115,19 @@ struct afs_call { long ret0; /* Value to reply with instead of 0 */ struct afs_addr_list *ret_alist; struct afs_vldb_entry *ret_vldb; - struct afs_acl *ret_acl; + char *ret_str; }; - struct afs_fid *out_fid; - struct afs_status_cb *out_dir_scb; - struct afs_status_cb *out_scb; - struct yfs_acl *out_yacl; - struct afs_volsync *out_volsync; - struct afs_volume_status *out_volstatus; - struct afs_read *read_request; + struct afs_operation *op; unsigned int server_index; - pgoff_t first; /* first page in mapping to deal with */ - pgoff_t last; /* last page in mapping to deal with */ atomic_t usage; enum afs_call_state state; spinlock_t state_lock; int error; /* error code */ u32 abort_code; /* Remote abort ID or 0 */ - u32 epoch; unsigned int max_lifespan; /* Maximum lifespan to set if not 0 */ unsigned request_size; /* size of request data */ unsigned reply_max; /* maximum size of reply */ - unsigned first_offset; /* offset into mapping[first] */ - union { - unsigned last_to; /* amount of mapping[last] */ - unsigned count2; /* count used in unmarshalling */ - }; + unsigned count2; /* count used in unmarshalling */ unsigned char unmarshall; /* unmarshalling phase */ unsigned char addr_ix; /* Address in ->alist */ bool drop_ref; /* T if need to drop ref for incoming call */ @@ -161,6 +137,7 @@ struct afs_call { bool upgrade; /* T to request service upgrade */ bool have_reply_time; /* T if have got reply_time */ bool intr; /* T if interruptible */ + bool unmarshalling_error; /* T if an unmarshalling error occurred */ u16 service_id; /* Actual service ID (after upgrade) */ unsigned int debug_id; /* Trace ID */ u32 operation_ID; /* operation ID for an incoming call */ @@ -291,6 +268,7 @@ struct afs_net { struct timer_list cells_timer; atomic_t cells_outstanding; seqlock_t cells_lock; + struct mutex cells_alias_lock; struct mutex proc_cells_lock; struct hlist_head proc_cells; @@ -299,9 +277,10 @@ struct afs_net { * cell, but in practice, people create aliases and subsets and there's * no easy way to distinguish them. */ - seqlock_t fs_lock; /* For fs_servers */ + seqlock_t fs_lock; /* For fs_servers, fs_probe_*, fs_proc */ struct rb_root fs_servers; /* afs_server (by server UUID or address) */ - struct list_head fs_updates; /* afs_server (by update_at) */ + struct list_head fs_probe_fast; /* List of afs_server to probe at 30s intervals */ + struct list_head fs_probe_slow; /* List of afs_server to probe at 5m intervals */ struct hlist_head fs_proc; /* procfs servers list */ struct hlist_head fs_addresses4; /* afs_server (by lowest IPv4 addr) */ @@ -310,6 +289,9 @@ struct afs_net { struct work_struct fs_manager; struct timer_list fs_timer; + + struct work_struct fs_prober; + struct timer_list fs_probe_timer; atomic_t servers_outstanding; /* File locking renewal management */ @@ -360,8 +342,10 @@ enum afs_cell_state { * for authentication and encryption. The cell name is not typically used in * the protocol. * - * There is no easy way to determine if two cells are aliases or one is a - * subset of another. + * Two cells are determined to be aliases if they have an explicit alias (YFS + * only), share any VL servers in common or have at least one volume in common. + * "In common" means that the address list of the VL servers or the fileservers + * share at least one endpoint. */ struct afs_cell { union { @@ -369,6 +353,8 @@ struct afs_cell { struct rb_node net_node; /* Node in net->cells */ }; struct afs_net *net; + struct afs_cell *alias_of; /* The cell this is an alias of */ + struct afs_volume *root_volume; /* The root.cell volume if there is one */ struct key *anonymous_key; /* anonymous user key for this cell */ struct work_struct manager; /* Manager for init/deinit/dns */ struct hlist_node proc_link; /* /proc cell list link */ @@ -381,15 +367,21 @@ struct afs_cell { unsigned long flags; #define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */ #define AFS_CELL_FL_DO_LOOKUP 1 /* DNS lookup requested */ +#define AFS_CELL_FL_CHECK_ALIAS 2 /* Need to check for aliases */ enum afs_cell_state state; short error; enum dns_record_source dns_source:8; /* Latest source of data from lookup */ enum dns_lookup_status dns_status:8; /* Latest status of data from lookup */ unsigned int dns_lookup_count; /* Counter of DNS lookups */ + /* The volumes belonging to this cell */ + struct rb_root volumes; /* Tree of volumes on this server */ + struct hlist_head proc_volumes; /* procfs volume list */ + seqlock_t volume_lock; /* For volumes */ + /* Active fileserver interaction state. */ - struct list_head proc_volumes; /* procfs volume list */ - rwlock_t proc_lock; + struct rb_root fs_servers; /* afs_server (by server UUID) */ + seqlock_t fs_lock; /* For fs_servers */ /* VL server list. */ rwlock_t vl_servers_lock; /* Lock on vl_servers */ @@ -471,6 +463,7 @@ struct afs_vldb_entry { #define AFS_VLDB_QUERY_ERROR 4 /* - VL server returned error */ uuid_t fs_server[AFS_NMAXNSERVERS]; + u32 addr_version[AFS_NMAXNSERVERS]; /* Registration change counters */ u8 fs_mask[AFS_NMAXNSERVERS]; #define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */ #define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */ @@ -492,94 +485,64 @@ struct afs_server { }; struct afs_addr_list __rcu *addresses; - struct rb_node uuid_rb; /* Link in net->servers */ + struct afs_cell *cell; /* Cell to which belongs (pins ref) */ + struct rb_node uuid_rb; /* Link in net->fs_servers */ + struct afs_server __rcu *uuid_next; /* Next server with same UUID */ + struct afs_server *uuid_prev; /* Previous server with same UUID */ + struct list_head probe_link; /* Link in net->fs_probe_list */ struct hlist_node addr4_link; /* Link in net->fs_addresses4 */ struct hlist_node addr6_link; /* Link in net->fs_addresses6 */ struct hlist_node proc_link; /* Link in net->fs_proc */ struct afs_server *gc_next; /* Next server in manager's list */ - time64_t put_time; /* Time at which last put */ - time64_t update_at; /* Time at which to next update the record */ + time64_t unuse_time; /* Time at which last unused */ unsigned long flags; -#define AFS_SERVER_FL_NOT_READY 1 /* The record is not ready for use */ -#define AFS_SERVER_FL_NOT_FOUND 2 /* VL server says no such server */ -#define AFS_SERVER_FL_VL_FAIL 3 /* Failed to access VL server */ -#define AFS_SERVER_FL_UPDATING 4 -#define AFS_SERVER_FL_PROBED 5 /* The fileserver has been probed */ -#define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */ -#define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */ +#define AFS_SERVER_FL_RESPONDING 0 /* The server is responding */ +#define AFS_SERVER_FL_UPDATING 1 +#define AFS_SERVER_FL_NEEDS_UPDATE 2 /* Fileserver address list is out of date */ +#define AFS_SERVER_FL_NOT_READY 4 /* The record is not ready for use */ +#define AFS_SERVER_FL_NOT_FOUND 5 /* VL server says no such server */ +#define AFS_SERVER_FL_VL_FAIL 6 /* Failed to access VL server */ #define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */ -#define AFS_SERVER_FL_IS_YFS 9 /* Server is YFS not AFS */ -#define AFS_SERVER_FL_NO_RM2 10 /* Fileserver doesn't support YFS.RemoveFile2 */ -#define AFS_SERVER_FL_HAVE_EPOCH 11 /* ->epoch is valid */ - atomic_t usage; +#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */ +#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */ +#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */ + atomic_t ref; /* Object refcount */ + atomic_t active; /* Active user count */ u32 addr_version; /* Address list version */ - u32 cm_epoch; /* Server RxRPC epoch */ + unsigned int rtt; /* Server's current RTT in uS */ unsigned int debug_id; /* Debugging ID for traces */ /* file service access */ rwlock_t fs_lock; /* access lock */ /* callback promise management */ - struct hlist_head cb_volumes; /* List of volume interests on this server */ unsigned cb_s_break; /* Break-everything counter. */ - rwlock_t cb_break_lock; /* Volume finding lock */ /* Probe state */ + unsigned long probed_at; /* Time last probe was dispatched (jiffies) */ wait_queue_head_t probe_wq; atomic_t probe_outstanding; spinlock_t probe_lock; struct { - unsigned int rtt; /* RTT as ktime/64 */ + unsigned int rtt; /* RTT in uS */ u32 abort_code; - u32 cm_epoch; short error; bool responded:1; bool is_yfs:1; bool not_yfs:1; bool local_failure:1; - bool cm_probed:1; - bool said_rebooted:1; - bool said_inconsistent:1; } probe; }; /* - * Volume collation in the server's callback interest list. - */ -struct afs_vol_interest { - struct hlist_node srv_link; /* Link in server->cb_volumes */ - struct hlist_head cb_interests; /* List of callback interests on the server */ - union { - struct rcu_head rcu; - afs_volid_t vid; /* Volume ID to match */ - }; - unsigned int usage; -}; - -/* - * Interest by a superblock on a server. - */ -struct afs_cb_interest { - struct hlist_node cb_vlink; /* Link in vol_interest->cb_interests */ - struct afs_vol_interest *vol_interest; - struct afs_server *server; /* Server on which this interest resides */ - struct super_block *sb; /* Superblock on which inodes reside */ - union { - struct rcu_head rcu; - afs_volid_t vid; /* Volume ID to match */ - }; - refcount_t usage; -}; - -/* - * Replaceable server list. + * Replaceable volume server list. */ struct afs_server_entry { struct afs_server *server; - struct afs_cb_interest *cb_interest; }; struct afs_server_list { + afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */ refcount_t usage; unsigned char nr_servers; unsigned char preferred; /* Preferred server */ @@ -593,11 +556,16 @@ struct afs_server_list { * Live AFS volume management. */ struct afs_volume { - afs_volid_t vid; /* volume ID */ + union { + struct rcu_head rcu; + afs_volid_t vid; /* volume ID */ + }; atomic_t usage; time64_t update_at; /* Time at which to next update */ struct afs_cell *cell; /* Cell to which belongs (pins ref) */ - struct list_head proc_link; /* Link in cell->vl_proc */ + struct rb_node cell_node; /* Link in cell->volumes */ + struct hlist_node proc_link; /* Link in cell->proc_volumes */ + struct super_block __rcu *sb; /* Superblock on which inodes reside */ unsigned long flags; #define AFS_VOLUME_NEEDS_UPDATE 0 /* - T if an update needs performing */ #define AFS_VOLUME_UPDATING 1 /* - T if an update is in progress */ @@ -605,10 +573,11 @@ struct afs_volume { #define AFS_VOLUME_DELETED 3 /* - T if volume appears deleted */ #define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */ #define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */ +#define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */ #ifdef CONFIG_AFS_FSCACHE struct fscache_cookie *cache; /* caching cookie */ #endif - struct afs_server_list *servers; /* List of servers on which volume resides */ + struct afs_server_list __rcu *servers; /* List of servers on which volume resides */ rwlock_t servers_lock; /* Lock for ->servers */ unsigned int servers_seq; /* Incremented each time ->servers changes */ @@ -616,7 +585,6 @@ struct afs_volume { rwlock_t cb_v_break_lock; afs_voltype_t type; /* type of volume */ - short error; char type_force; /* force volume type (suppress R/O -> R/W) */ u8 name_len; u8 name[AFS_MAXVOLNAME + 1]; /* NUL-padded volume name */ @@ -677,11 +645,11 @@ struct afs_vnode { afs_lock_type_t lock_type : 8; /* outstanding callback notification on this file */ - struct afs_cb_interest __rcu *cb_interest; /* Server on which this resides */ + void *cb_server; /* Server with callback/filelock */ unsigned int cb_s_break; /* Mass break counter on ->server */ unsigned int cb_v_break; /* Mass break counter on ->volume */ unsigned int cb_break; /* Break counter on vnode */ - seqlock_t cb_lock; /* Lock for ->cb_interest, ->status, ->cb_*break */ + seqlock_t cb_lock; /* Lock for ->cb_server, ->status, ->cb_*break */ time64_t cb_expires_at; /* time at which callback expires */ }; @@ -758,29 +726,118 @@ struct afs_vl_cursor { }; /* - * Cursor for iterating over a set of fileservers. + * Fileserver operation methods. + */ +struct afs_operation_ops { + void (*issue_afs_rpc)(struct afs_operation *op); + void (*issue_yfs_rpc)(struct afs_operation *op); + void (*success)(struct afs_operation *op); + void (*aborted)(struct afs_operation *op); + void (*edit_dir)(struct afs_operation *op); + void (*put)(struct afs_operation *op); +}; + +struct afs_vnode_param { + struct afs_vnode *vnode; + struct afs_fid fid; /* Fid to access */ + struct afs_status_cb scb; /* Returned status and callback promise */ + afs_dataversion_t dv_before; /* Data version before the call */ + unsigned int cb_break_before; /* cb_break + cb_s_break before the call */ + u8 dv_delta; /* Expected change in data version */ + bool put_vnode; /* T if we have a ref on the vnode */ + bool need_io_lock; /* T if we need the I/O lock on this */ +}; + +/* + * Fileserver operation wrapper, handling server and address rotation + * asynchronously. May make simultaneous calls to multiple servers. */ -struct afs_fs_cursor { +struct afs_operation { + struct afs_net *net; /* Network namespace */ + struct key *key; /* Key for the cell */ const struct afs_call_type *type; /* Type of call done */ + const struct afs_operation_ops *ops; + + /* Parameters/results for the operation */ + struct afs_volume *volume; /* Volume being accessed */ + struct afs_vnode_param file[2]; + struct afs_vnode_param *more_files; + struct afs_volsync volsync; + struct dentry *dentry; /* Dentry to be altered */ + struct dentry *dentry_2; /* Second dentry to be altered */ + struct timespec64 mtime; /* Modification time to record */ + short nr_files; /* Number of entries in file[], more_files */ + short error; + unsigned int abort_code; + unsigned int debug_id; + + unsigned int cb_v_break; /* Volume break counter before op */ + unsigned int cb_s_break; /* Server break counter before op */ + + union { + struct { + int which; /* Which ->file[] to fetch for */ + } fetch_status; + struct { + int reason; /* enum afs_edit_dir_reason */ + mode_t mode; + const char *symlink; + } create; + struct { + bool need_rehash; + } unlink; + struct { + struct dentry *rehash; + struct dentry *tmp; + bool new_negative; + } rename; + struct { + struct afs_read *req; + } fetch; + struct { + struct afs_vnode *lvnode; /* vnode being locked */ + afs_lock_type_t type; + } lock; + struct { + struct address_space *mapping; /* Pages being written from */ + pgoff_t first; /* first page in mapping to deal with */ + pgoff_t last; /* last page in mapping to deal with */ + unsigned first_offset; /* offset into mapping[first] */ + unsigned last_to; /* amount of mapping[last] */ + } store; + struct { + struct iattr *attr; + } setattr; + struct afs_acl *acl; + struct yfs_acl *yacl; + struct { + struct afs_volume_status vs; + struct kstatfs *buf; + } volstatus; + }; + + /* Fileserver iteration state */ struct afs_addr_cursor ac; - struct afs_vnode *vnode; struct afs_server_list *server_list; /* Current server list (pins ref) */ - struct afs_cb_interest *cbi; /* Server on which this resides (pins ref) */ - struct key *key; /* Key for the server */ + struct afs_server *server; /* Server we're using (ref pinned by server_list) */ + struct afs_call *call; unsigned long untried; /* Bitmask of untried servers */ - unsigned int cb_break; /* cb_break + cb_s_break before the call */ - unsigned int cb_break_2; /* cb_break + cb_s_break (2nd vnode) */ short index; /* Current server */ - short error; - unsigned short flags; -#define AFS_FS_CURSOR_STOP 0x0001 /* Set to cease iteration */ -#define AFS_FS_CURSOR_VBUSY 0x0002 /* Set if seen VBUSY */ -#define AFS_FS_CURSOR_VMOVED 0x0004 /* Set if seen VMOVED */ -#define AFS_FS_CURSOR_VNOVOL 0x0008 /* Set if seen VNOVOL */ -#define AFS_FS_CURSOR_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */ -#define AFS_FS_CURSOR_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */ -#define AFS_FS_CURSOR_INTR 0x0040 /* Set if op is interruptible */ unsigned short nr_iterations; /* Number of server iterations */ + + unsigned int flags; +#define AFS_OPERATION_STOP 0x0001 /* Set to cease iteration */ +#define AFS_OPERATION_VBUSY 0x0002 /* Set if seen VBUSY */ +#define AFS_OPERATION_VMOVED 0x0004 /* Set if seen VMOVED */ +#define AFS_OPERATION_VNOVOL 0x0008 /* Set if seen VNOVOL */ +#define AFS_OPERATION_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */ +#define AFS_OPERATION_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */ +#define AFS_OPERATION_UNINTR 0x0040 /* Set if op is uninterruptible */ +#define AFS_OPERATION_DOWNGRADE 0x0080 /* Set to retry with downgraded opcode */ +#define AFS_OPERATION_LOCK_0 0x0100 /* Set if have io_lock on file[0] */ +#define AFS_OPERATION_LOCK_1 0x0200 /* Set if have io_lock on file[1] */ +#define AFS_OPERATION_TRIED_ALL 0x0400 /* Set if we've tried all the fileservers */ +#define AFS_OPERATION_RETRY_SERVER 0x0800 /* Set if we should retry the current server */ }; /* @@ -838,29 +895,15 @@ extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason); extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason); extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break *); -extern int afs_register_server_cb_interest(struct afs_vnode *, - struct afs_server_list *, unsigned int); -extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *); -extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *); - -static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi) -{ - if (cbi) - refcount_inc(&cbi->usage); - return cbi; -} - static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode) { return vnode->cb_break + vnode->cb_v_break; } static inline bool afs_cb_is_broken(unsigned int cb_break, - const struct afs_vnode *vnode, - const struct afs_cb_interest *cbi) + const struct afs_vnode *vnode) { - return !cbi || cb_break != (vnode->cb_break + - vnode->volume->cb_v_break); + return cb_break != (vnode->cb_break + vnode->volume->cb_v_break); } /* @@ -952,71 +995,81 @@ extern int afs_flock(struct file *, int, struct file_lock *); /* * fsclient.c */ -extern int afs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_status_cb *, - struct afs_volsync *); -extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *); -extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_status_cb *, struct afs_read *); -extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t, - struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *); -extern int afs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, - struct afs_status_cb *); -extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, - struct afs_status_cb *, struct afs_status_cb *); -extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, - struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *); -extern int afs_fs_rename(struct afs_fs_cursor *, const char *, - struct afs_vnode *, const char *, - struct afs_status_cb *, struct afs_status_cb *); -extern int afs_fs_store_data(struct afs_fs_cursor *, struct address_space *, - pgoff_t, pgoff_t, unsigned, unsigned, struct afs_status_cb *); -extern int afs_fs_setattr(struct afs_fs_cursor *, struct iattr *, struct afs_status_cb *); -extern int afs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *); -extern int afs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t, struct afs_status_cb *); -extern int afs_fs_extend_lock(struct afs_fs_cursor *, struct afs_status_cb *); -extern int afs_fs_release_lock(struct afs_fs_cursor *, struct afs_status_cb *); +extern void afs_fs_fetch_status(struct afs_operation *); +extern void afs_fs_fetch_data(struct afs_operation *); +extern void afs_fs_create_file(struct afs_operation *); +extern void afs_fs_make_dir(struct afs_operation *); +extern void afs_fs_remove_file(struct afs_operation *); +extern void afs_fs_remove_dir(struct afs_operation *); +extern void afs_fs_link(struct afs_operation *); +extern void afs_fs_symlink(struct afs_operation *); +extern void afs_fs_rename(struct afs_operation *); +extern void afs_fs_store_data(struct afs_operation *); +extern void afs_fs_setattr(struct afs_operation *); +extern void afs_fs_get_volume_status(struct afs_operation *); +extern void afs_fs_set_lock(struct afs_operation *); +extern void afs_fs_extend_lock(struct afs_operation *); +extern void afs_fs_release_lock(struct afs_operation *); extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *, struct afs_addr_cursor *, struct key *); -extern struct afs_call *afs_fs_get_capabilities(struct afs_net *, struct afs_server *, - struct afs_addr_cursor *, struct key *, - unsigned int); -extern int afs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *, - struct afs_fid *, struct afs_status_cb *, - unsigned int, struct afs_volsync *); -extern int afs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *, - struct afs_fid *, struct afs_status_cb *, - struct afs_volsync *); +extern bool afs_fs_get_capabilities(struct afs_net *, struct afs_server *, + struct afs_addr_cursor *, struct key *); +extern void afs_fs_inline_bulk_status(struct afs_operation *); struct afs_acl { u32 size; u8 data[]; }; -extern struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *, struct afs_status_cb *); -extern int afs_fs_store_acl(struct afs_fs_cursor *, const struct afs_acl *, - struct afs_status_cb *); +extern void afs_fs_fetch_acl(struct afs_operation *); +extern void afs_fs_store_acl(struct afs_operation *); + +/* + * fs_operation.c + */ +extern struct afs_operation *afs_alloc_operation(struct key *, struct afs_volume *); +extern int afs_put_operation(struct afs_operation *); +extern bool afs_begin_vnode_operation(struct afs_operation *); +extern void afs_wait_for_operation(struct afs_operation *); +extern int afs_do_sync_operation(struct afs_operation *); + +static inline void afs_op_nomem(struct afs_operation *op) +{ + op->error = -ENOMEM; +} + +static inline void afs_op_set_vnode(struct afs_operation *op, unsigned int n, + struct afs_vnode *vnode) +{ + op->file[n].vnode = vnode; + op->file[n].need_io_lock = true; +} + +static inline void afs_op_set_fid(struct afs_operation *op, unsigned int n, + const struct afs_fid *fid) +{ + op->file[n].fid = *fid; +} /* * fs_probe.c */ extern void afs_fileserver_probe_result(struct afs_call *); -extern int afs_probe_fileservers(struct afs_net *, struct key *, struct afs_server_list *); +extern void afs_fs_probe_fileserver(struct afs_net *, struct afs_server *, struct key *, bool); extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long); +extern void afs_probe_fileserver(struct afs_net *, struct afs_server *); +extern void afs_fs_probe_dispatcher(struct work_struct *); +extern int afs_wait_for_one_fs_probe(struct afs_server *, bool); /* * inode.c */ -extern void afs_vnode_commit_status(struct afs_fs_cursor *, - struct afs_vnode *, - unsigned int, - const afs_dataversion_t *, - struct afs_status_cb *); +extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *); extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *); -extern int afs_iget5_test(struct inode *, void *); +extern int afs_ilookup5_test_by_fid(struct inode *, void *); extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool); -extern struct inode *afs_iget(struct super_block *, struct key *, - struct afs_iget_data *, struct afs_status_cb *, - struct afs_cb_interest *, - struct afs_vnode *); +extern struct inode *afs_iget(struct afs_operation *, struct afs_vnode_param *); +extern struct inode *afs_root_iget(struct super_block *, struct key *); extern void afs_zap_data(struct afs_vnode *); extern bool afs_check_validity(struct afs_vnode *); extern int afs_validate(struct afs_vnode *, struct key *); @@ -1104,11 +1157,8 @@ static inline void afs_put_sysnames(struct afs_sysnames *sysnames) {} /* * rotate.c */ -extern bool afs_begin_vnode_operation(struct afs_fs_cursor *, struct afs_vnode *, - struct key *, bool); -extern bool afs_select_fileserver(struct afs_fs_cursor *); -extern bool afs_select_current_fileserver(struct afs_fs_cursor *); -extern int afs_end_vnode_operation(struct afs_fs_cursor *); +extern bool afs_select_fileserver(struct afs_operation *); +extern void afs_dump_edestaddrreq(const struct afs_operation *); /* * rxrpc.c @@ -1128,12 +1178,17 @@ extern void afs_flat_call_destructor(struct afs_call *); extern void afs_send_empty_reply(struct afs_call *); extern void afs_send_simple_reply(struct afs_call *, const void *, size_t); extern int afs_extract_data(struct afs_call *, bool); -extern int afs_protocol_error(struct afs_call *, int, enum afs_eproto_cause); +extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause); -static inline void afs_set_fc_call(struct afs_call *call, struct afs_fs_cursor *fc) +static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call, + gfp_t gfp) { - call->intr = fc->flags & AFS_FS_CURSOR_INTR; - fc->type = call->type; + op->call = call; + op->type = call->type; + call->op = op; + call->key = op->key; + call->intr = !(op->flags & AFS_OPERATION_UNINTR); + afs_make_call(&op->ac, call, gfp); } static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size) @@ -1241,13 +1296,33 @@ extern spinlock_t afs_server_peer_lock; extern struct afs_server *afs_find_server(struct afs_net *, const struct sockaddr_rxrpc *); extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *); -extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *); +extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *, u32); extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace); +extern struct afs_server *afs_use_server(struct afs_server *, enum afs_server_trace); +extern void afs_unuse_server(struct afs_net *, struct afs_server *, enum afs_server_trace); +extern void afs_unuse_server_notime(struct afs_net *, struct afs_server *, enum afs_server_trace); extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace); extern void afs_manage_servers(struct work_struct *); extern void afs_servers_timer(struct timer_list *); +extern void afs_fs_probe_timer(struct timer_list *); extern void __net_exit afs_purge_servers(struct afs_net *); -extern bool afs_check_server_record(struct afs_fs_cursor *, struct afs_server *); +extern bool afs_check_server_record(struct afs_operation *, struct afs_server *); + +static inline void afs_inc_servers_outstanding(struct afs_net *net) +{ + atomic_inc(&net->servers_outstanding); +} + +static inline void afs_dec_servers_outstanding(struct afs_net *net) +{ + if (atomic_dec_and_test(&net->servers_outstanding)) + wake_up_var(&net->servers_outstanding); +} + +static inline bool afs_is_probing_server(struct afs_server *server) +{ + return list_empty(&server->probe_link); +} /* * server_list.c @@ -1279,6 +1354,12 @@ extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *, const uu extern struct afs_call *afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *, struct key *, struct afs_vlserver *, unsigned int); extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *, const uuid_t *); +extern char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *); + +/* + * vl_alias.c + */ +extern int afs_cell_detect_alias(struct afs_cell *, struct key *); /* * vl_probe.c @@ -1322,18 +1403,12 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *, /* * volume.c */ -static inline struct afs_volume *__afs_get_volume(struct afs_volume *volume) -{ - if (volume) - atomic_inc(&volume->usage); - return volume; -} - extern struct afs_volume *afs_create_volume(struct afs_fs_context *); extern void afs_activate_volume(struct afs_volume *); extern void afs_deactivate_volume(struct afs_volume *); -extern void afs_put_volume(struct afs_cell *, struct afs_volume *); -extern int afs_check_volume_status(struct afs_volume *, struct afs_fs_cursor *); +extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace); +extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace); +extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *); /* * write.c @@ -1362,36 +1437,24 @@ extern ssize_t afs_listxattr(struct dentry *, char *, size_t); /* * yfsclient.c */ -extern int yfs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_status_cb *, - struct afs_volsync *); -extern int yfs_fs_fetch_data(struct afs_fs_cursor *, struct afs_status_cb *, struct afs_read *); -extern int yfs_fs_create_file(struct afs_fs_cursor *, const char *, umode_t, struct afs_status_cb *, - struct afs_fid *, struct afs_status_cb *); -extern int yfs_fs_make_dir(struct afs_fs_cursor *, const char *, umode_t, struct afs_status_cb *, - struct afs_fid *, struct afs_status_cb *); -extern int yfs_fs_remove_file2(struct afs_fs_cursor *, struct afs_vnode *, const char *, - struct afs_status_cb *, struct afs_status_cb *); -extern int yfs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, - struct afs_status_cb *); -extern int yfs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, - struct afs_status_cb *, struct afs_status_cb *); -extern int yfs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, - struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *); -extern int yfs_fs_rename(struct afs_fs_cursor *, const char *, struct afs_vnode *, const char *, - struct afs_status_cb *, struct afs_status_cb *); -extern int yfs_fs_store_data(struct afs_fs_cursor *, struct address_space *, - pgoff_t, pgoff_t, unsigned, unsigned, struct afs_status_cb *); -extern int yfs_fs_setattr(struct afs_fs_cursor *, struct iattr *, struct afs_status_cb *); -extern int yfs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *); -extern int yfs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t, struct afs_status_cb *); -extern int yfs_fs_extend_lock(struct afs_fs_cursor *, struct afs_status_cb *); -extern int yfs_fs_release_lock(struct afs_fs_cursor *, struct afs_status_cb *); -extern int yfs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *, - struct afs_fid *, struct afs_status_cb *, - struct afs_volsync *); -extern int yfs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *, - struct afs_fid *, struct afs_status_cb *, - unsigned int, struct afs_volsync *); +extern void yfs_fs_fetch_file_status(struct afs_operation *); +extern void yfs_fs_fetch_data(struct afs_operation *); +extern void yfs_fs_create_file(struct afs_operation *); +extern void yfs_fs_make_dir(struct afs_operation *); +extern void yfs_fs_remove_file2(struct afs_operation *); +extern void yfs_fs_remove_file(struct afs_operation *); +extern void yfs_fs_remove_dir(struct afs_operation *); +extern void yfs_fs_link(struct afs_operation *); +extern void yfs_fs_symlink(struct afs_operation *); +extern void yfs_fs_rename(struct afs_operation *); +extern void yfs_fs_store_data(struct afs_operation *); +extern void yfs_fs_setattr(struct afs_operation *); +extern void yfs_fs_get_volume_status(struct afs_operation *); +extern void yfs_fs_set_lock(struct afs_operation *); +extern void yfs_fs_extend_lock(struct afs_operation *); +extern void yfs_fs_release_lock(struct afs_operation *); +extern void yfs_fs_fetch_status(struct afs_operation *); +extern void yfs_fs_inline_bulk_status(struct afs_operation *); struct yfs_acl { struct afs_acl *acl; /* Dir/file/symlink ACL */ @@ -1404,10 +1467,8 @@ struct yfs_acl { }; extern void yfs_free_opaque_acl(struct yfs_acl *); -extern struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *, struct yfs_acl *, - struct afs_status_cb *); -extern int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *, const struct afs_acl *, - struct afs_status_cb *); +extern void yfs_fs_fetch_opaque_acl(struct afs_operation *); +extern void yfs_fs_store_opaque_acl2(struct afs_operation *); /* * Miscellaneous inline functions. @@ -1422,15 +1483,29 @@ static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode) return &vnode->vfs_inode; } -static inline void afs_check_for_remote_deletion(struct afs_fs_cursor *fc, +static inline void afs_check_for_remote_deletion(struct afs_operation *op, struct afs_vnode *vnode) { - if (fc->ac.error == -ENOENT) { + if (op->error == -ENOENT) { set_bit(AFS_VNODE_DELETED, &vnode->flags); afs_break_callback(vnode, afs_cb_break_for_deleted); } } +/* + * Note that a dentry got changed. We need to set d_fsdata to the data version + * number derived from the result of the operation. It doesn't matter if + * d_fsdata goes backwards as we'll just revalidate. + */ +static inline void afs_update_dentry_version(struct afs_operation *op, + struct afs_vnode_param *dir_vp, + struct dentry *dentry) +{ + if (!op->error) + dentry->d_fsdata = + (void *)(unsigned long)dir_vp->scb.status.data_version; +} + static inline int afs_io_error(struct afs_call *call, enum afs_io_error where) { trace_afs_io_error(call->debug_id, -EIO, where); diff --git a/fs/afs/main.c b/fs/afs/main.c index c9c45d7078bd..9c79c91e8005 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -82,12 +82,14 @@ static int __net_init afs_net_init(struct net *net_ns) INIT_WORK(&net->cells_manager, afs_manage_cells); timer_setup(&net->cells_timer, afs_cells_timer, 0); + mutex_init(&net->cells_alias_lock); mutex_init(&net->proc_cells_lock); INIT_HLIST_HEAD(&net->proc_cells); seqlock_init(&net->fs_lock); net->fs_servers = RB_ROOT; - INIT_LIST_HEAD(&net->fs_updates); + INIT_LIST_HEAD(&net->fs_probe_fast); + INIT_LIST_HEAD(&net->fs_probe_slow); INIT_HLIST_HEAD(&net->fs_proc); INIT_HLIST_HEAD(&net->fs_addresses4); @@ -96,6 +98,8 @@ static int __net_init afs_net_init(struct net *net_ns) INIT_WORK(&net->fs_manager, afs_manage_servers); timer_setup(&net->fs_timer, afs_servers_timer, 0); + INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher); + timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0); ret = -ENOMEM; sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL); diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 468e1713bce1..22d00cf1913d 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -38,7 +38,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v) if (v == SEQ_START_TOKEN) { /* display header on line 1 */ - seq_puts(m, "USE TTL SV NAME\n"); + seq_puts(m, "USE TTL SV ST NAME\n"); return 0; } @@ -46,10 +46,11 @@ static int afs_proc_cells_show(struct seq_file *m, void *v) vllist = rcu_dereference(cell->vl_servers); /* display one cell per line on subsequent lines */ - seq_printf(m, "%3u %6lld %2u %s\n", + seq_printf(m, "%3u %6lld %2u %2u %s\n", atomic_read(&cell->usage), cell->dns_expiry - ktime_get_real_seconds(), vllist->nr_servers, + cell->state, cell->name); return 0; } @@ -208,11 +209,10 @@ static const char afs_vol_types[3][3] = { */ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v) { - struct afs_cell *cell = PDE_DATA(file_inode(m->file)); - struct afs_volume *vol = list_entry(v, struct afs_volume, proc_link); + struct afs_volume *vol = hlist_entry(v, struct afs_volume, proc_link); /* Display header on line 1 */ - if (v == &cell->proc_volumes) { + if (v == SEQ_START_TOKEN) { seq_puts(m, "USE VID TY NAME\n"); return 0; } @@ -230,8 +230,8 @@ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos) { struct afs_cell *cell = PDE_DATA(file_inode(m->file)); - read_lock(&cell->proc_lock); - return seq_list_start_head(&cell->proc_volumes, *_pos); + rcu_read_lock(); + return seq_hlist_start_head_rcu(&cell->proc_volumes, *_pos); } static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v, @@ -239,15 +239,13 @@ static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v, { struct afs_cell *cell = PDE_DATA(file_inode(m->file)); - return seq_list_next(v, &cell->proc_volumes, _pos); + return seq_hlist_next_rcu(v, &cell->proc_volumes, _pos); } static void afs_proc_cell_volumes_stop(struct seq_file *m, void *v) __releases(cell->proc_lock) { - struct afs_cell *cell = PDE_DATA(file_inode(m->file)); - - read_unlock(&cell->proc_lock); + rcu_read_unlock(); } static const struct seq_operations afs_proc_cell_volumes_ops = { @@ -378,20 +376,26 @@ static int afs_proc_servers_show(struct seq_file *m, void *v) int i; if (v == SEQ_START_TOKEN) { - seq_puts(m, "UUID USE ADDR\n"); + seq_puts(m, "UUID REF ACT\n"); return 0; } server = list_entry(v, struct afs_server, proc_link); alist = rcu_dereference(server->addresses); - seq_printf(m, "%pU %3d %pISpc%s\n", + seq_printf(m, "%pU %3d %3d\n", &server->uuid, - atomic_read(&server->usage), - &alist->addrs[0].transport, - alist->preferred == 0 ? "*" : ""); - for (i = 1; i < alist->nr_addrs; i++) - seq_printf(m, " %pISpc%s\n", - &alist->addrs[i].transport, + atomic_read(&server->ref), + atomic_read(&server->active)); + seq_printf(m, " - info: fl=%lx rtt=%u brk=%x\n", + server->flags, server->rtt, server->cb_s_break); + seq_printf(m, " - probe: last=%d out=%d\n", + (int)(jiffies - server->probed_at) / HZ, + atomic_read(&server->probe_outstanding)); + seq_printf(m, " - ALIST v=%u rsp=%lx f=%lx\n", + alist->version, alist->responded, alist->failed); + for (i = 0; i < alist->nr_addrs; i++) + seq_printf(m, " [%x] %pISpc%s\n", + i, &alist->addrs[i].transport, alist->preferred == i ? "*" : ""); return 0; } diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h index 32be9c698348..b5bd03b1d3c7 100644 --- a/fs/afs/protocol_yfs.h +++ b/fs/afs/protocol_yfs.h @@ -8,7 +8,7 @@ #define YFS_FS_SERVICE 2500 #define YFS_CM_SERVICE 2501 -#define YFSCBMAX 1024 +#define YFSCBMAX 1024 enum YFS_CM_Operations { YFSCBProbe = 206, /* probe client */ diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index 2a3305e42b14..6a0935cb822f 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -15,60 +15,32 @@ #include "afs_fs.h" /* - * Begin an operation on the fileserver. - * - * Fileserver operations are serialised on the server by vnode, so we serialise - * them here also using the io_lock. - */ -bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode, - struct key *key, bool intr) -{ - memset(fc, 0, sizeof(*fc)); - fc->vnode = vnode; - fc->key = key; - fc->ac.error = SHRT_MAX; - fc->error = -EDESTADDRREQ; - - if (intr) { - fc->flags |= AFS_FS_CURSOR_INTR; - if (mutex_lock_interruptible(&vnode->io_lock) < 0) { - fc->error = -EINTR; - fc->flags |= AFS_FS_CURSOR_STOP; - return false; - } - } else { - mutex_lock(&vnode->io_lock); - } - - if (vnode->lock_state != AFS_VNODE_LOCK_NONE) - fc->flags |= AFS_FS_CURSOR_CUR_ONLY; - return true; -} - -/* * Begin iteration through a server list, starting with the vnode's last used * server if possible, or the last recorded good server if not. */ -static bool afs_start_fs_iteration(struct afs_fs_cursor *fc, +static bool afs_start_fs_iteration(struct afs_operation *op, struct afs_vnode *vnode) { - struct afs_cb_interest *cbi; + struct afs_server *server; + void *cb_server; int i; - read_lock(&vnode->volume->servers_lock); - fc->server_list = afs_get_serverlist(vnode->volume->servers); - read_unlock(&vnode->volume->servers_lock); + read_lock(&op->volume->servers_lock); + op->server_list = afs_get_serverlist( + rcu_dereference_protected(op->volume->servers, + lockdep_is_held(&op->volume->servers_lock))); + read_unlock(&op->volume->servers_lock); - fc->untried = (1UL << fc->server_list->nr_servers) - 1; - fc->index = READ_ONCE(fc->server_list->preferred); + op->untried = (1UL << op->server_list->nr_servers) - 1; + op->index = READ_ONCE(op->server_list->preferred); - cbi = rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->io_lock)); - if (cbi) { + cb_server = vnode->cb_server; + if (cb_server) { /* See if the vnode's preferred record is still available */ - for (i = 0; i < fc->server_list->nr_servers; i++) { - if (fc->server_list->servers[i].cb_interest == cbi) { - fc->index = i; + for (i = 0; i < op->server_list->nr_servers; i++) { + server = op->server_list->servers[i].server; + if (server == cb_server) { + op->index = i; goto found_interest; } } @@ -77,21 +49,18 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc, * serving this vnode, then we can't switch to another server * and have to return an error. */ - if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) { - fc->error = -ESTALE; + if (op->flags & AFS_OPERATION_CUR_ONLY) { + op->error = -ESTALE; return false; } /* Note that the callback promise is effectively broken */ write_seqlock(&vnode->cb_lock); - ASSERTCMP(cbi, ==, rcu_access_pointer(vnode->cb_interest)); - rcu_assign_pointer(vnode->cb_interest, NULL); + ASSERTCMP(cb_server, ==, vnode->cb_server); + vnode->cb_server = NULL; if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) vnode->cb_break++; write_sequnlock(&vnode->cb_lock); - - afs_put_cb_interest(afs_v2net(vnode), cbi); - cbi = NULL; } found_interest: @@ -118,12 +87,12 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code) /* * Sleep and retry the operation to the same fileserver. */ -static bool afs_sleep_and_retry(struct afs_fs_cursor *fc) +static bool afs_sleep_and_retry(struct afs_operation *op) { - if (fc->flags & AFS_FS_CURSOR_INTR) { + if (!(op->flags & AFS_OPERATION_UNINTR)) { msleep_interruptible(1000); if (signal_pending(current)) { - fc->error = -ERESTARTSYS; + op->error = -ERESTARTSYS; return false; } } else { @@ -137,26 +106,26 @@ static bool afs_sleep_and_retry(struct afs_fs_cursor *fc) * Select the fileserver to use. May be called multiple times to rotate * through the fileservers. */ -bool afs_select_fileserver(struct afs_fs_cursor *fc) +bool afs_select_fileserver(struct afs_operation *op) { struct afs_addr_list *alist; struct afs_server *server; - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode *vnode = op->file[0].vnode; struct afs_error e; u32 rtt; - int error = fc->ac.error, i; + int error = op->ac.error, i; _enter("%lx[%d],%lx[%d],%d,%d", - fc->untried, fc->index, - fc->ac.tried, fc->ac.index, - error, fc->ac.abort_code); + op->untried, op->index, + op->ac.tried, op->ac.index, + error, op->ac.abort_code); - if (fc->flags & AFS_FS_CURSOR_STOP) { + if (op->flags & AFS_OPERATION_STOP) { _leave(" = f [stopped]"); return false; } - fc->nr_iterations++; + op->nr_iterations++; /* Evaluate the result of the previous operation, if there was one. */ switch (error) { @@ -166,8 +135,8 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) case 0: default: /* Success or local failure. Stop. */ - fc->error = error; - fc->flags |= AFS_FS_CURSOR_STOP; + op->error = error; + op->flags |= AFS_OPERATION_STOP; _leave(" = f [okay/local %d]", error); return false; @@ -175,42 +144,42 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) /* The far side rejected the operation on some grounds. This * might involve the server being busy or the volume having been moved. */ - switch (fc->ac.abort_code) { + switch (op->ac.abort_code) { case VNOVOL: /* This fileserver doesn't know about the volume. * - May indicate that the VL is wrong - retry once and compare * the results. * - May indicate that the fileserver couldn't attach to the vol. */ - if (fc->flags & AFS_FS_CURSOR_VNOVOL) { - fc->error = -EREMOTEIO; + if (op->flags & AFS_OPERATION_VNOVOL) { + op->error = -EREMOTEIO; goto next_server; } - write_lock(&vnode->volume->servers_lock); - fc->server_list->vnovol_mask |= 1 << fc->index; - write_unlock(&vnode->volume->servers_lock); + write_lock(&op->volume->servers_lock); + op->server_list->vnovol_mask |= 1 << op->index; + write_unlock(&op->volume->servers_lock); - set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags); - error = afs_check_volume_status(vnode->volume, fc); + set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags); + error = afs_check_volume_status(op->volume, op); if (error < 0) goto failed_set_error; - if (test_bit(AFS_VOLUME_DELETED, &vnode->volume->flags)) { - fc->error = -ENOMEDIUM; + if (test_bit(AFS_VOLUME_DELETED, &op->volume->flags)) { + op->error = -ENOMEDIUM; goto failed; } /* If the server list didn't change, then assume that * it's the fileserver having trouble. */ - if (vnode->volume->servers == fc->server_list) { - fc->error = -EREMOTEIO; + if (rcu_access_pointer(op->volume->servers) == op->server_list) { + op->error = -EREMOTEIO; goto next_server; } /* Try again */ - fc->flags |= AFS_FS_CURSOR_VNOVOL; + op->flags |= AFS_OPERATION_VNOVOL; _leave(" = t [vnovol]"); return true; @@ -220,20 +189,20 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) case VONLINE: case VDISKFULL: case VOVERQUOTA: - fc->error = afs_abort_to_error(fc->ac.abort_code); + op->error = afs_abort_to_error(op->ac.abort_code); goto next_server; case VOFFLINE: - if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags)) { - afs_busy(vnode->volume, fc->ac.abort_code); - clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags); + if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &op->volume->flags)) { + afs_busy(op->volume, op->ac.abort_code); + clear_bit(AFS_VOLUME_BUSY, &op->volume->flags); } - if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) { - fc->error = -EADV; + if (op->flags & AFS_OPERATION_NO_VSLEEP) { + op->error = -EADV; goto failed; } - if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) { - fc->error = -ESTALE; + if (op->flags & AFS_OPERATION_CUR_ONLY) { + op->error = -ESTALE; goto failed; } goto busy; @@ -244,17 +213,17 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) /* Retry after going round all the servers unless we * have a file lock we need to maintain. */ - if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) { - fc->error = -EBUSY; + if (op->flags & AFS_OPERATION_NO_VSLEEP) { + op->error = -EBUSY; goto failed; } - if (!test_and_set_bit(AFS_VOLUME_BUSY, &vnode->volume->flags)) { - afs_busy(vnode->volume, fc->ac.abort_code); - clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags); + if (!test_and_set_bit(AFS_VOLUME_BUSY, &op->volume->flags)) { + afs_busy(op->volume, op->ac.abort_code); + clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags); } busy: - if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) { - if (!afs_sleep_and_retry(fc)) + if (op->flags & AFS_OPERATION_CUR_ONLY) { + if (!afs_sleep_and_retry(op)) goto failed; /* Retry with same server & address */ @@ -262,7 +231,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) return true; } - fc->flags |= AFS_FS_CURSOR_VBUSY; + op->flags |= AFS_OPERATION_VBUSY; goto next_server; case VMOVED: @@ -273,15 +242,15 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) * We also limit the number of VMOVED hops we will * honour, just in case someone sets up a loop. */ - if (fc->flags & AFS_FS_CURSOR_VMOVED) { - fc->error = -EREMOTEIO; + if (op->flags & AFS_OPERATION_VMOVED) { + op->error = -EREMOTEIO; goto failed; } - fc->flags |= AFS_FS_CURSOR_VMOVED; + op->flags |= AFS_OPERATION_VMOVED; - set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags); - set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags); - error = afs_check_volume_status(vnode->volume, fc); + set_bit(AFS_VOLUME_WAIT, &op->volume->flags); + set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags); + error = afs_check_volume_status(op->volume, op); if (error < 0) goto failed_set_error; @@ -294,23 +263,23 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) * * TODO: Retry a few times with sleeps. */ - if (vnode->volume->servers == fc->server_list) { - fc->error = -ENOMEDIUM; + if (rcu_access_pointer(op->volume->servers) == op->server_list) { + op->error = -ENOMEDIUM; goto failed; } goto restart_from_beginning; default: - clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags); - clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags); - fc->error = afs_abort_to_error(fc->ac.abort_code); + clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags); + clear_bit(AFS_VOLUME_BUSY, &op->volume->flags); + op->error = afs_abort_to_error(op->ac.abort_code); goto failed; } case -ETIMEDOUT: case -ETIME: - if (fc->error != -EDESTADDRREQ) + if (op->error != -EDESTADDRREQ) goto iterate_address; /* Fall through */ case -ERFKILL: @@ -320,103 +289,94 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) case -EHOSTDOWN: case -ECONNREFUSED: _debug("no conn"); - fc->error = error; + op->error = error; goto iterate_address; case -ECONNRESET: _debug("call reset"); - fc->error = error; + op->error = error; goto failed; } restart_from_beginning: _debug("restart"); - afs_end_cursor(&fc->ac); - afs_put_cb_interest(afs_v2net(vnode), fc->cbi); - fc->cbi = NULL; - afs_put_serverlist(afs_v2net(vnode), fc->server_list); - fc->server_list = NULL; + afs_end_cursor(&op->ac); + op->server = NULL; + afs_put_serverlist(op->net, op->server_list); + op->server_list = NULL; start: _debug("start"); /* See if we need to do an update of the volume record. Note that the * volume may have moved or even have been deleted. */ - error = afs_check_volume_status(vnode->volume, fc); + error = afs_check_volume_status(op->volume, op); if (error < 0) goto failed_set_error; - if (!afs_start_fs_iteration(fc, vnode)) + if (!afs_start_fs_iteration(op, vnode)) goto failed; - _debug("__ VOL %llx __", vnode->volume->vid); - error = afs_probe_fileservers(afs_v2net(vnode), fc->key, fc->server_list); - if (error < 0) - goto failed_set_error; + _debug("__ VOL %llx __", op->volume->vid); pick_server: - _debug("pick [%lx]", fc->untried); + _debug("pick [%lx]", op->untried); - error = afs_wait_for_fs_probes(fc->server_list, fc->untried); + error = afs_wait_for_fs_probes(op->server_list, op->untried); if (error < 0) goto failed_set_error; /* Pick the untried server with the lowest RTT. If we have outstanding * callbacks, we stick with the server we're already using if we can. */ - if (fc->cbi) { - _debug("cbi %u", fc->index); - if (test_bit(fc->index, &fc->untried)) + if (op->server) { + _debug("server %u", op->index); + if (test_bit(op->index, &op->untried)) goto selected_server; - afs_put_cb_interest(afs_v2net(vnode), fc->cbi); - fc->cbi = NULL; - _debug("nocbi"); + op->server = NULL; + _debug("no server"); } - fc->index = -1; + op->index = -1; rtt = U32_MAX; - for (i = 0; i < fc->server_list->nr_servers; i++) { - struct afs_server *s = fc->server_list->servers[i].server; + for (i = 0; i < op->server_list->nr_servers; i++) { + struct afs_server *s = op->server_list->servers[i].server; - if (!test_bit(i, &fc->untried) || !s->probe.responded) + if (!test_bit(i, &op->untried) || + !test_bit(AFS_SERVER_FL_RESPONDING, &s->flags)) continue; if (s->probe.rtt < rtt) { - fc->index = i; + op->index = i; rtt = s->probe.rtt; } } - if (fc->index == -1) + if (op->index == -1) goto no_more_servers; selected_server: - _debug("use %d", fc->index); - __clear_bit(fc->index, &fc->untried); + _debug("use %d", op->index); + __clear_bit(op->index, &op->untried); /* We're starting on a different fileserver from the list. We need to * check it, create a callback intercept, find its address list and * probe its capabilities before we use it. */ - ASSERTCMP(fc->ac.alist, ==, NULL); - server = fc->server_list->servers[fc->index].server; + ASSERTCMP(op->ac.alist, ==, NULL); + server = op->server_list->servers[op->index].server; - if (!afs_check_server_record(fc, server)) + if (!afs_check_server_record(op, server)) goto failed; _debug("USING SERVER: %pU", &server->uuid); - /* Make sure we've got a callback interest record for this server. We - * have to link it in before we send the request as we can be sent a - * break request before we've finished decoding the reply and - * installing the vnode. - */ - error = afs_register_server_cb_interest(vnode, fc->server_list, - fc->index); - if (error < 0) - goto failed_set_error; - - fc->cbi = afs_get_cb_interest( - rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->io_lock))); + op->flags |= AFS_OPERATION_RETRY_SERVER; + op->server = server; + if (vnode->cb_server != server) { + vnode->cb_server = server; + vnode->cb_s_break = server->cb_s_break; + vnode->cb_v_break = vnode->volume->cb_v_break; + clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); + } read_lock(&server->fs_lock); alist = rcu_dereference_protected(server->addresses, @@ -424,44 +384,68 @@ selected_server: afs_get_addrlist(alist); read_unlock(&server->fs_lock); - memset(&fc->ac, 0, sizeof(fc->ac)); +retry_server: + memset(&op->ac, 0, sizeof(op->ac)); - if (!fc->ac.alist) - fc->ac.alist = alist; + if (!op->ac.alist) + op->ac.alist = alist; else afs_put_addrlist(alist); - fc->ac.index = -1; + op->ac.index = -1; iterate_address: - ASSERT(fc->ac.alist); + ASSERT(op->ac.alist); /* Iterate over the current server's address list to try and find an * address on which it will respond to us. */ - if (!afs_iterate_addresses(&fc->ac)) - goto next_server; + if (!afs_iterate_addresses(&op->ac)) + goto out_of_addresses; - _debug("address [%u] %u/%u", fc->index, fc->ac.index, fc->ac.alist->nr_addrs); + _debug("address [%u] %u/%u %pISp", + op->index, op->ac.index, op->ac.alist->nr_addrs, + &op->ac.alist->addrs[op->ac.index].transport); _leave(" = t"); return true; +out_of_addresses: + /* We've now had a failure to respond on all of a server's addresses - + * immediately probe them again and consider retrying the server. + */ + afs_probe_fileserver(op->net, op->server); + if (op->flags & AFS_OPERATION_RETRY_SERVER) { + alist = op->ac.alist; + error = afs_wait_for_one_fs_probe( + op->server, !(op->flags & AFS_OPERATION_UNINTR)); + switch (error) { + case 0: + op->flags &= ~AFS_OPERATION_RETRY_SERVER; + goto retry_server; + case -ERESTARTSYS: + goto failed_set_error; + case -ETIME: + case -EDESTADDRREQ: + goto next_server; + } + } + next_server: _debug("next"); - afs_end_cursor(&fc->ac); + afs_end_cursor(&op->ac); goto pick_server; no_more_servers: /* That's all the servers poked to no good effect. Try again if some * of them were busy. */ - if (fc->flags & AFS_FS_CURSOR_VBUSY) + if (op->flags & AFS_OPERATION_VBUSY) goto restart_from_beginning; e.error = -EDESTADDRREQ; e.responded = false; - for (i = 0; i < fc->server_list->nr_servers; i++) { - struct afs_server *s = fc->server_list->servers[i].server; + for (i = 0; i < op->server_list->nr_servers; i++) { + struct afs_server *s = op->server_list->servers[i].server; afs_prioritise_error(&e, READ_ONCE(s->probe.error), s->probe.abort_code); @@ -470,101 +454,18 @@ no_more_servers: error = e.error; failed_set_error: - fc->error = error; + op->error = error; failed: - fc->flags |= AFS_FS_CURSOR_STOP; - afs_end_cursor(&fc->ac); - _leave(" = f [failed %d]", fc->error); - return false; -} - -/* - * Select the same fileserver we used for a vnode before and only that - * fileserver. We use this when we have a lock on that file, which is backed - * only by the fileserver we obtained it from. - */ -bool afs_select_current_fileserver(struct afs_fs_cursor *fc) -{ - struct afs_vnode *vnode = fc->vnode; - struct afs_cb_interest *cbi; - struct afs_addr_list *alist; - int error = fc->ac.error; - - _enter(""); - - cbi = rcu_dereference_protected(vnode->cb_interest, - lockdep_is_held(&vnode->io_lock)); - - switch (error) { - case SHRT_MAX: - if (!cbi) { - fc->error = -ESTALE; - fc->flags |= AFS_FS_CURSOR_STOP; - return false; - } - - fc->cbi = afs_get_cb_interest(cbi); - - read_lock(&cbi->server->fs_lock); - alist = rcu_dereference_protected(cbi->server->addresses, - lockdep_is_held(&cbi->server->fs_lock)); - afs_get_addrlist(alist); - read_unlock(&cbi->server->fs_lock); - if (!alist) { - fc->error = -ESTALE; - fc->flags |= AFS_FS_CURSOR_STOP; - return false; - } - - memset(&fc->ac, 0, sizeof(fc->ac)); - fc->ac.alist = alist; - fc->ac.index = -1; - goto iterate_address; - - case 0: - default: - /* Success or local failure. Stop. */ - fc->error = error; - fc->flags |= AFS_FS_CURSOR_STOP; - _leave(" = f [okay/local %d]", error); - return false; - - case -ECONNABORTED: - fc->error = afs_abort_to_error(fc->ac.abort_code); - fc->flags |= AFS_FS_CURSOR_STOP; - _leave(" = f [abort]"); - return false; - - case -ERFKILL: - case -EADDRNOTAVAIL: - case -ENETUNREACH: - case -EHOSTUNREACH: - case -EHOSTDOWN: - case -ECONNREFUSED: - case -ETIMEDOUT: - case -ETIME: - _debug("no conn"); - fc->error = error; - goto iterate_address; - } - -iterate_address: - /* Iterate over the current server's address list to try and find an - * address on which it will respond to us. - */ - if (afs_iterate_addresses(&fc->ac)) { - _leave(" = t"); - return true; - } - - afs_end_cursor(&fc->ac); + op->flags |= AFS_OPERATION_STOP; + afs_end_cursor(&op->ac); + _leave(" = f [failed %d]", op->error); return false; } /* * Dump cursor state in the case of the error being EDESTADDRREQ. */ -static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc) +void afs_dump_edestaddrreq(const struct afs_operation *op) { static int count; int i; @@ -576,13 +477,14 @@ static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc) rcu_read_lock(); pr_notice("EDESTADDR occurred\n"); - pr_notice("FC: cbb=%x cbb2=%x fl=%hx err=%hd\n", - fc->cb_break, fc->cb_break_2, fc->flags, fc->error); + pr_notice("FC: cbb=%x cbb2=%x fl=%x err=%hd\n", + op->file[0].cb_break_before, + op->file[1].cb_break_before, op->flags, op->error); pr_notice("FC: ut=%lx ix=%d ni=%u\n", - fc->untried, fc->index, fc->nr_iterations); + op->untried, op->index, op->nr_iterations); - if (fc->server_list) { - const struct afs_server_list *sl = fc->server_list; + if (op->server_list) { + const struct afs_server_list *sl = op->server_list; pr_notice("FC: SL nr=%u pr=%u vnov=%hx\n", sl->nr_servers, sl->preferred, sl->vnovol_mask); for (i = 0; i < sl->nr_servers; i++) { @@ -596,41 +498,16 @@ static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc) a->version, a->nr_ipv4, a->nr_addrs, a->max_addrs, a->preferred); - pr_notice("FC: - pr=%lx R=%lx F=%lx\n", - a->probed, a->responded, a->failed); - if (a == fc->ac.alist) + pr_notice("FC: - R=%lx F=%lx\n", + a->responded, a->failed); + if (a == op->ac.alist) pr_notice("FC: - current\n"); } } } pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n", - fc->ac.tried, fc->ac.index, fc->ac.abort_code, fc->ac.error, - fc->ac.responded, fc->ac.nr_iterations); + op->ac.tried, op->ac.index, op->ac.abort_code, op->ac.error, + op->ac.responded, op->ac.nr_iterations); rcu_read_unlock(); } - -/* - * Tidy up a filesystem cursor and unlock the vnode. - */ -int afs_end_vnode_operation(struct afs_fs_cursor *fc) -{ - struct afs_net *net = afs_v2net(fc->vnode); - - if (fc->error == -EDESTADDRREQ || - fc->error == -EADDRNOTAVAIL || - fc->error == -ENETUNREACH || - fc->error == -EHOSTUNREACH) - afs_dump_edestaddrreq(fc); - - mutex_unlock(&fc->vnode->io_lock); - - afs_end_cursor(&fc->ac); - afs_put_cb_interest(net, fc->cbi); - afs_put_serverlist(net, fc->server_list); - - if (fc->error == -ECONNABORTED) - fc->error = afs_abort_to_error(fc->ac.abort_code); - - return fc->error; -} diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 1ecc67da6c1a..8fc8fb406a5a 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -37,7 +37,6 @@ int afs_open_socket(struct afs_net *net) { struct sockaddr_rxrpc srx; struct socket *socket; - unsigned int min_level; int ret; _enter(""); @@ -57,9 +56,8 @@ int afs_open_socket(struct afs_net *net) srx.transport.sin6.sin6_family = AF_INET6; srx.transport.sin6.sin6_port = htons(AFS_CM_PORT); - min_level = RXRPC_SECURITY_ENCRYPT; - ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL, - (void *)&min_level, sizeof(min_level)); + ret = rxrpc_sock_set_min_security_level(socket->sk, + RXRPC_SECURITY_ENCRYPT); if (ret < 0) goto error_2; @@ -183,8 +181,7 @@ void afs_put_call(struct afs_call *call) if (call->type->destructor) call->type->destructor(call); - afs_put_server(call->net, call->server, afs_server_trace_put_call); - afs_put_cb_interest(call->net, call->cbi); + afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call); afs_put_addrlist(call->alist); kfree(call->request); @@ -283,18 +280,19 @@ static void afs_load_bvec(struct afs_call *call, struct msghdr *msg, struct bio_vec *bv, pgoff_t first, pgoff_t last, unsigned offset) { + struct afs_operation *op = call->op; struct page *pages[AFS_BVEC_MAX]; unsigned int nr, n, i, to, bytes = 0; nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX); - n = find_get_pages_contig(call->mapping, first, nr, pages); + n = find_get_pages_contig(op->store.mapping, first, nr, pages); ASSERTCMP(n, ==, nr); msg->msg_flags |= MSG_MORE; for (i = 0; i < nr; i++) { to = PAGE_SIZE; if (first + i >= last) { - to = call->last_to; + to = op->store.last_to; msg->msg_flags &= ~MSG_MORE; } bv[i].bv_page = pages[i]; @@ -324,13 +322,14 @@ static void afs_notify_end_request_tx(struct sock *sock, */ static int afs_send_pages(struct afs_call *call, struct msghdr *msg) { + struct afs_operation *op = call->op; struct bio_vec bv[AFS_BVEC_MAX]; unsigned int bytes, nr, loop, offset; - pgoff_t first = call->first, last = call->last; + pgoff_t first = op->store.first, last = op->store.last; int ret; - offset = call->first_offset; - call->first_offset = 0; + offset = op->store.first_offset; + op->store.first_offset = 0; do { afs_load_bvec(call, msg, bv, first, last, offset); @@ -340,7 +339,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg) bytes = msg->msg_iter.count; nr = msg->msg_iter.nr_segs; - ret = rxrpc_kernel_send_data(call->net->socket, call->rxcall, msg, + ret = rxrpc_kernel_send_data(op->net->socket, call->rxcall, msg, bytes, afs_notify_end_request_tx); for (loop = 0; loop < nr; loop++) put_page(bv[loop].bv_page); @@ -350,7 +349,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg) first += nr; } while (first <= last); - trace_afs_sent_pages(call, call->first, last, first, ret); + trace_afs_sent_pages(call, op->store.first, last, first, ret); return ret; } @@ -385,16 +384,18 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) */ tx_total_len = call->request_size; if (call->send_pages) { - if (call->last == call->first) { - tx_total_len += call->last_to - call->first_offset; + struct afs_operation *op = call->op; + + if (op->store.last == op->store.first) { + tx_total_len += op->store.last_to - op->store.first_offset; } else { /* It looks mathematically like you should be able to * combine the following lines with the ones above, but * unsigned arithmetic is fun when it wraps... */ - tx_total_len += PAGE_SIZE - call->first_offset; - tx_total_len += call->last_to; - tx_total_len += (call->last - call->first - 1) * PAGE_SIZE; + tx_total_len += PAGE_SIZE - op->store.first_offset; + tx_total_len += op->store.last_to; + tx_total_len += (op->store.last - op->store.first - 1) * PAGE_SIZE; } } @@ -540,13 +541,15 @@ static void afs_deliver_to_call(struct afs_call *call) ret = call->type->deliver(call); state = READ_ONCE(call->state); + if (ret == 0 && call->unmarshalling_error) + ret = -EBADMSG; switch (ret) { case 0: afs_queue_call_work(call); if (state == AFS_CALL_CL_PROC_REPLY) { - if (call->cbi) + if (call->op) set_bit(AFS_SERVER_FL_MAY_HAVE_CB, - &call->cbi->server->flags); + &call->op->server->flags); goto call_complete; } ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY); @@ -959,9 +962,11 @@ int afs_extract_data(struct afs_call *call, bool want_more) /* * Log protocol error production. */ -noinline int afs_protocol_error(struct afs_call *call, int error, +noinline int afs_protocol_error(struct afs_call *call, enum afs_eproto_cause cause) { - trace_afs_protocol_error(call, error, cause); - return error; + trace_afs_protocol_error(call, cause); + if (call) + call->unmarshalling_error = true; + return -EBADMSG; } diff --git a/fs/afs/security.c b/fs/afs/security.c index ce9de1e6742b..90d852704328 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -170,8 +170,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, break; } - if (afs_cb_is_broken(cb_break, vnode, - rcu_dereference(vnode->cb_interest))) { + if (afs_cb_is_broken(cb_break, vnode)) { changed = true; break; } @@ -201,7 +200,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, } } - if (afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest))) + if (afs_cb_is_broken(cb_break, vnode)) goto someone_else_changed_it; /* We need a ref on any permits list we want to copy as we'll have to @@ -281,8 +280,7 @@ found: rcu_read_lock(); spin_lock(&vnode->lock); zap = rcu_access_pointer(vnode->permit_cache); - if (!afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)) && - zap == permits) + if (!afs_cb_is_broken(cb_break, vnode) && zap == permits) rcu_assign_pointer(vnode->permit_cache, replacement); else zap = replacement; diff --git a/fs/afs/server.c b/fs/afs/server.c index 11b90ac7ea30..039e3488511c 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -12,19 +12,11 @@ #include "protocol_yfs.h" static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */ -static unsigned afs_server_update_delay = 30; /* Time till VLDB recheck in secs */ static atomic_t afs_server_debug_id; -static void afs_inc_servers_outstanding(struct afs_net *net) -{ - atomic_inc(&net->servers_outstanding); -} - -static void afs_dec_servers_outstanding(struct afs_net *net) -{ - if (atomic_dec_and_test(&net->servers_outstanding)) - wake_up_var(&net->servers_outstanding); -} +static struct afs_server *afs_maybe_use_server(struct afs_server *, + enum afs_server_trace); +static void __afs_put_server(struct afs_net *, struct afs_server *); /* * Find a server by one of its addresses. @@ -41,7 +33,7 @@ struct afs_server *afs_find_server(struct afs_net *net, do { if (server) - afs_put_server(net, server, afs_server_trace_put_find_rsq); + afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq); server = NULL; read_seqbegin_or_lock(&net->fs_addr_lock, &seq); @@ -79,9 +71,9 @@ struct afs_server *afs_find_server(struct afs_net *net, } server = NULL; + continue; found: - if (server && !atomic_inc_not_zero(&server->usage)) - server = NULL; + server = afs_maybe_use_server(server, afs_server_trace_get_by_addr); } while (need_seqretry(&net->fs_addr_lock, seq)); @@ -92,7 +84,7 @@ struct afs_server *afs_find_server(struct afs_net *net, } /* - * Look up a server by its UUID + * Look up a server by its UUID and mark it active. */ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid) { @@ -108,7 +100,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu * changes. */ if (server) - afs_put_server(net, server, afs_server_trace_put_uuid_rsq); + afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq); server = NULL; read_seqbegin_or_lock(&net->fs_lock, &seq); @@ -123,7 +115,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu } else if (diff > 0) { p = p->rb_right; } else { - afs_get_server(server, afs_server_trace_get_by_uuid); + afs_use_server(server, afs_server_trace_get_by_uuid); break; } @@ -138,13 +130,16 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu } /* - * Install a server record in the namespace tree + * Install a server record in the namespace tree. If there's a clash, we stick + * it into a list anchored on whichever afs_server struct is actually in the + * tree. */ -static struct afs_server *afs_install_server(struct afs_net *net, +static struct afs_server *afs_install_server(struct afs_cell *cell, struct afs_server *candidate) { const struct afs_addr_list *alist; - struct afs_server *server; + struct afs_server *server, *next; + struct afs_net *net = cell->net; struct rb_node **pp, *p; int diff; @@ -160,12 +155,30 @@ static struct afs_server *afs_install_server(struct afs_net *net, _debug("- consider %p", p); server = rb_entry(p, struct afs_server, uuid_rb); diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t)); - if (diff < 0) + if (diff < 0) { pp = &(*pp)->rb_left; - else if (diff > 0) + } else if (diff > 0) { pp = &(*pp)->rb_right; - else - goto exists; + } else { + if (server->cell == cell) + goto exists; + + /* We have the same UUID representing servers in + * different cells. Append the new server to the list. + */ + for (;;) { + next = rcu_dereference_protected( + server->uuid_next, + lockdep_is_held(&net->fs_lock.lock)); + if (!next) + break; + server = next; + } + rcu_assign_pointer(server->uuid_next, candidate); + candidate->uuid_prev = server; + server = candidate; + goto added_dup; + } } server = candidate; @@ -173,6 +186,7 @@ static struct afs_server *afs_install_server(struct afs_net *net, rb_insert_color(&server->uuid_rb, &net->fs_servers); hlist_add_head_rcu(&server->proc_link, &net->fs_proc); +added_dup: write_seqlock(&net->fs_addr_lock); alist = rcu_dereference_protected(server->addresses, lockdep_is_held(&net->fs_addr_lock.lock)); @@ -199,13 +213,14 @@ exists: } /* - * allocate a new server record + * Allocate a new server record and mark it active. */ -static struct afs_server *afs_alloc_server(struct afs_net *net, +static struct afs_server *afs_alloc_server(struct afs_cell *cell, const uuid_t *uuid, struct afs_addr_list *alist) { struct afs_server *server; + struct afs_net *net = cell->net; _enter(""); @@ -213,20 +228,21 @@ static struct afs_server *afs_alloc_server(struct afs_net *net, if (!server) goto enomem; - atomic_set(&server->usage, 1); + atomic_set(&server->ref, 1); + atomic_set(&server->active, 1); server->debug_id = atomic_inc_return(&afs_server_debug_id); RCU_INIT_POINTER(server->addresses, alist); server->addr_version = alist->version; server->uuid = *uuid; - server->update_at = ktime_get_real_seconds() + afs_server_update_delay; rwlock_init(&server->fs_lock); - INIT_HLIST_HEAD(&server->cb_volumes); - rwlock_init(&server->cb_break_lock); init_waitqueue_head(&server->probe_wq); + INIT_LIST_HEAD(&server->probe_link); spin_lock_init(&server->probe_lock); + server->cell = cell; + server->rtt = UINT_MAX; afs_inc_servers_outstanding(net); - trace_afs_server(server, 1, afs_server_trace_alloc); + trace_afs_server(server, 1, 1, afs_server_trace_alloc); _leave(" = %p", server); return server; @@ -264,7 +280,7 @@ static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell, * Get or create a fileserver record. */ struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key, - const uuid_t *uuid) + const uuid_t *uuid, u32 addr_version) { struct afs_addr_list *alist; struct afs_server *server, *candidate; @@ -272,26 +288,34 @@ struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key, _enter("%p,%pU", cell->net, uuid); server = afs_find_server_by_uuid(cell->net, uuid); - if (server) + if (server) { + if (server->addr_version != addr_version) + set_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags); return server; + } alist = afs_vl_lookup_addrs(cell, key, uuid); if (IS_ERR(alist)) return ERR_CAST(alist); - candidate = afs_alloc_server(cell->net, uuid, alist); + candidate = afs_alloc_server(cell, uuid, alist); if (!candidate) { afs_put_addrlist(alist); return ERR_PTR(-ENOMEM); } - server = afs_install_server(cell->net, candidate); + server = afs_install_server(cell, candidate); if (server != candidate) { afs_put_addrlist(alist); kfree(candidate); + } else { + /* Immediately dispatch an asynchronous probe to each interface + * on the fileserver. This will make sure the repeat-probing + * service is started. + */ + afs_fs_probe_fileserver(cell->net, server, key, true); } - _leave(" = %p{%d}", server, atomic_read(&server->usage)); return server; } @@ -327,9 +351,38 @@ void afs_servers_timer(struct timer_list *timer) struct afs_server *afs_get_server(struct afs_server *server, enum afs_server_trace reason) { - unsigned int u = atomic_inc_return(&server->usage); + unsigned int u = atomic_inc_return(&server->ref); + + trace_afs_server(server, u, atomic_read(&server->active), reason); + return server; +} + +/* + * Try to get a reference on a server object. + */ +static struct afs_server *afs_maybe_use_server(struct afs_server *server, + enum afs_server_trace reason) +{ + unsigned int r = atomic_fetch_add_unless(&server->ref, 1, 0); + unsigned int a; + + if (r == 0) + return NULL; + + a = atomic_inc_return(&server->active); + trace_afs_server(server, r, a, reason); + return server; +} + +/* + * Get an active count on a server object. + */ +struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason) +{ + unsigned int r = atomic_inc_return(&server->ref); + unsigned int a = atomic_inc_return(&server->active); - trace_afs_server(server, u, reason); + trace_afs_server(server, r, a, reason); return server; } @@ -344,32 +397,57 @@ void afs_put_server(struct afs_net *net, struct afs_server *server, if (!server) return; - server->put_time = ktime_get_real_seconds(); - - usage = atomic_dec_return(&server->usage); + usage = atomic_dec_return(&server->ref); + trace_afs_server(server, usage, atomic_read(&server->active), reason); + if (unlikely(usage == 0)) + __afs_put_server(net, server); +} - trace_afs_server(server, usage, reason); +/* + * Drop an active count on a server object without updating the last-unused + * time. + */ +void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server, + enum afs_server_trace reason) +{ + if (server) { + unsigned int active = atomic_dec_return(&server->active); - if (likely(usage > 0)) - return; + if (active == 0) + afs_set_server_timer(net, afs_server_gc_delay); + afs_put_server(net, server, reason); + } +} - afs_set_server_timer(net, afs_server_gc_delay); +/* + * Drop an active count on a server object. + */ +void afs_unuse_server(struct afs_net *net, struct afs_server *server, + enum afs_server_trace reason) +{ + if (server) { + server->unuse_time = ktime_get_real_seconds(); + afs_unuse_server_notime(net, server, reason); + } } static void afs_server_rcu(struct rcu_head *rcu) { struct afs_server *server = container_of(rcu, struct afs_server, rcu); - trace_afs_server(server, atomic_read(&server->usage), - afs_server_trace_free); + trace_afs_server(server, atomic_read(&server->ref), + atomic_read(&server->active), afs_server_trace_free); afs_put_addrlist(rcu_access_pointer(server->addresses)); kfree(server); } -/* - * destroy a dead server - */ -static void afs_destroy_server(struct afs_net *net, struct afs_server *server) +static void __afs_put_server(struct afs_net *net, struct afs_server *server) +{ + call_rcu(&server->rcu, afs_server_rcu); + afs_dec_servers_outstanding(net); +} + +static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server) { struct afs_addr_list *alist = rcu_access_pointer(server->addresses); struct afs_addr_cursor ac = { @@ -378,19 +456,18 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server) .error = 0, }; - trace_afs_server(server, atomic_read(&server->usage), - afs_server_trace_give_up_cb); + afs_fs_give_up_all_callbacks(net, server, &ac, NULL); +} +/* + * destroy a dead server + */ +static void afs_destroy_server(struct afs_net *net, struct afs_server *server) +{ if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags)) - afs_fs_give_up_all_callbacks(net, server, &ac, NULL); + afs_give_up_callbacks(net, server); - wait_var_event(&server->probe_outstanding, - atomic_read(&server->probe_outstanding) == 0); - - trace_afs_server(server, atomic_read(&server->usage), - afs_server_trace_destroy); - call_rcu(&server->rcu, afs_server_rcu); - afs_dec_servers_outstanding(net); + afs_put_server(net, server, afs_server_trace_destroy); } /* @@ -398,32 +475,49 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server) */ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list) { - struct afs_server *server; - bool deleted; - int usage; + struct afs_server *server, *next, *prev; + int active; while ((server = gc_list)) { gc_list = server->gc_next; write_seqlock(&net->fs_lock); - usage = 1; - deleted = atomic_try_cmpxchg(&server->usage, &usage, 0); - trace_afs_server(server, usage, afs_server_trace_gc); - if (deleted) { - rb_erase(&server->uuid_rb, &net->fs_servers); - hlist_del_rcu(&server->proc_link); - } - write_sequnlock(&net->fs_lock); - if (deleted) { - write_seqlock(&net->fs_addr_lock); + active = atomic_read(&server->active); + if (active == 0) { + trace_afs_server(server, atomic_read(&server->ref), + active, afs_server_trace_gc); + next = rcu_dereference_protected( + server->uuid_next, lockdep_is_held(&net->fs_lock.lock)); + prev = server->uuid_prev; + if (!prev) { + /* The one at the front is in the tree */ + if (!next) { + rb_erase(&server->uuid_rb, &net->fs_servers); + } else { + rb_replace_node_rcu(&server->uuid_rb, + &next->uuid_rb, + &net->fs_servers); + next->uuid_prev = NULL; + } + } else { + /* This server is not at the front */ + rcu_assign_pointer(prev->uuid_next, next); + if (next) + next->uuid_prev = prev; + } + + list_del(&server->probe_link); + hlist_del_rcu(&server->proc_link); if (!hlist_unhashed(&server->addr4_link)) hlist_del_rcu(&server->addr4_link); if (!hlist_unhashed(&server->addr6_link)) hlist_del_rcu(&server->addr6_link); - write_sequnlock(&net->fs_addr_lock); - afs_destroy_server(net, server); } + write_sequnlock(&net->fs_lock); + + if (active == 0) + afs_destroy_server(net, server); } } @@ -452,15 +546,14 @@ void afs_manage_servers(struct work_struct *work) for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) { struct afs_server *server = rb_entry(cursor, struct afs_server, uuid_rb); - int usage = atomic_read(&server->usage); + int active = atomic_read(&server->active); - _debug("manage %pU %u", &server->uuid, usage); + _debug("manage %pU %u", &server->uuid, active); - ASSERTCMP(usage, >=, 1); - ASSERTIFCMP(purging, usage, ==, 1); + ASSERTIFCMP(purging, active, ==, 0); - if (usage == 1) { - time64_t expire_at = server->put_time; + if (active == 0) { + time64_t expire_at = server->unuse_time; if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) && !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags)) @@ -525,26 +618,27 @@ void afs_purge_servers(struct afs_net *net) /* * Get an update for a server's address list. */ -static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server) +static noinline bool afs_update_server_record(struct afs_operation *op, + struct afs_server *server) { struct afs_addr_list *alist, *discard; _enter(""); - trace_afs_server(server, atomic_read(&server->usage), afs_server_trace_update); + trace_afs_server(server, atomic_read(&server->ref), atomic_read(&server->active), + afs_server_trace_update); - alist = afs_vl_lookup_addrs(fc->vnode->volume->cell, fc->key, - &server->uuid); + alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid); if (IS_ERR(alist)) { if ((PTR_ERR(alist) == -ERESTARTSYS || PTR_ERR(alist) == -EINTR) && - !(fc->flags & AFS_FS_CURSOR_INTR) && + (op->flags & AFS_OPERATION_UNINTR) && server->addresses) { _leave(" = t [intr]"); return true; } - fc->error = PTR_ERR(alist); - _leave(" = f [%d]", fc->error); + op->error = PTR_ERR(alist); + _leave(" = f [%d]", op->error); return false; } @@ -558,7 +652,6 @@ static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct a write_unlock(&server->fs_lock); } - server->update_at = ktime_get_real_seconds() + afs_server_update_delay; afs_put_addrlist(discard); _leave(" = t"); return true; @@ -567,10 +660,8 @@ static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct a /* * See if a server's address list needs updating. */ -bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server) +bool afs_check_server_record(struct afs_operation *op, struct afs_server *server) { - time64_t now = ktime_get_real_seconds(); - long diff; bool success; int ret, retries = 0; @@ -579,25 +670,29 @@ bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server ASSERT(server); retry: - diff = READ_ONCE(server->update_at) - now; - if (diff > 0) { - _leave(" = t [not now %ld]", diff); - return true; - } + if (test_bit(AFS_SERVER_FL_UPDATING, &server->flags)) + goto wait; + if (test_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags)) + goto update; + _leave(" = t [good]"); + return true; +update: if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) { - success = afs_update_server_record(fc, server); + clear_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags); + success = afs_update_server_record(op, server); clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags); wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING); _leave(" = %d", success); return success; } +wait: ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING, - (fc->flags & AFS_FS_CURSOR_INTR) ? - TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + (op->flags & AFS_OPERATION_UNINTR) ? + TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); if (ret == -ERESTARTSYS) { - fc->error = ret; + op->error = ret; _leave(" = f [intr]"); return false; } diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 888d91d195d9..ed9056703505 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c @@ -14,11 +14,9 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist) int i; if (slist && refcount_dec_and_test(&slist->usage)) { - for (i = 0; i < slist->nr_servers; i++) { - afs_put_cb_interest(net, slist->servers[i].cb_interest); - afs_put_server(net, slist->servers[i].server, - afs_server_trace_put_slist); - } + for (i = 0; i < slist->nr_servers; i++) + afs_unuse_server(net, slist->servers[i].server, + afs_server_trace_put_slist); kfree(slist); } } @@ -46,12 +44,16 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, refcount_set(&slist->usage, 1); rwlock_init(&slist->lock); + for (i = 0; i < AFS_MAXTYPES; i++) + slist->vids[i] = vldb->vid[i]; + /* Make sure a records exists for each server in the list. */ for (i = 0; i < vldb->nr_servers; i++) { if (!(vldb->fs_mask[i] & type_mask)) continue; - server = afs_lookup_server(cell, key, &vldb->fs_server[i]); + server = afs_lookup_server(cell, key, &vldb->fs_server[i], + vldb->addr_version[i]); if (IS_ERR(server)) { ret = PTR_ERR(server); if (ret == -ENOENT || @@ -123,31 +125,5 @@ changed: } } - /* Keep the old callback interest records where possible so that we - * maintain callback interception. - */ - i = 0; - j = 0; - while (i < old->nr_servers && j < new->nr_servers) { - if (new->servers[j].server == old->servers[i].server) { - struct afs_cb_interest *cbi = old->servers[i].cb_interest; - if (cbi) { - new->servers[j].cb_interest = cbi; - refcount_inc(&cbi->usage); - } - i++; - j++; - continue; - } - - if (new->servers[j].server < old->servers[i].server) { - j++; - continue; - } - - i++; - continue; - } - return true; } diff --git a/fs/afs/super.c b/fs/afs/super.c index dda7a9a66848..b552357b1d13 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -352,7 +352,9 @@ static int afs_validate_fc(struct fs_context *fc) { struct afs_fs_context *ctx = fc->fs_private; struct afs_volume *volume; + struct afs_cell *cell; struct key *key; + int ret; if (!ctx->dyn_root) { if (ctx->no_cell) { @@ -365,6 +367,7 @@ static int afs_validate_fc(struct fs_context *fc) return -EDESTADDRREQ; } + reget_key: /* We try to do the mount securely. */ key = afs_request_key(ctx->cell); if (IS_ERR(key)) @@ -373,10 +376,26 @@ static int afs_validate_fc(struct fs_context *fc) ctx->key = key; if (ctx->volume) { - afs_put_volume(ctx->cell, ctx->volume); + afs_put_volume(ctx->net, ctx->volume, + afs_volume_trace_put_validate_fc); ctx->volume = NULL; } + if (test_bit(AFS_CELL_FL_CHECK_ALIAS, &ctx->cell->flags)) { + ret = afs_cell_detect_alias(ctx->cell, key); + if (ret < 0) + return ret; + if (ret == 1) { + _debug("switch to alias"); + key_put(ctx->key); + ctx->key = NULL; + cell = afs_get_cell(ctx->cell->alias_of); + afs_put_cell(ctx->net, ctx->cell); + ctx->cell = cell; + goto reget_key; + } + } + volume = afs_create_volume(ctx); if (IS_ERR(volume)) return PTR_ERR(volume); @@ -421,7 +440,6 @@ static int afs_set_super(struct super_block *sb, struct fs_context *fc) static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) { struct afs_super_info *as = AFS_FS_S(sb); - struct afs_iget_data iget_data; struct inode *inode = NULL; int ret; @@ -446,13 +464,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) } else { sprintf(sb->s_id, "%llu", as->volume->vid); afs_activate_volume(as->volume); - iget_data.fid.vid = as->volume->vid; - iget_data.fid.vnode = 1; - iget_data.fid.vnode_hi = 0; - iget_data.fid.unique = 1; - iget_data.cb_v_break = as->volume->cb_v_break; - iget_data.cb_s_break = 0; - inode = afs_iget(sb, ctx->key, &iget_data, NULL, NULL, NULL); + inode = afs_root_iget(sb, ctx->key); } if (IS_ERR(inode)) @@ -473,6 +485,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) goto error; } else { sb->s_d_op = &afs_fs_dentry_operations; + rcu_assign_pointer(as->volume->sb, sb); } _leave(" = 0"); @@ -496,7 +509,8 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc) as->dyn_root = true; } else { as->cell = afs_get_cell(ctx->cell); - as->volume = __afs_get_volume(ctx->volume); + as->volume = afs_get_volume(ctx->volume, + afs_volume_trace_get_alloc_sbi); } } return as; @@ -505,8 +519,9 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc) static void afs_destroy_sbi(struct afs_super_info *as) { if (as) { - afs_put_volume(as->cell, as->volume); - afs_put_cell(afs_net(as->net_ns), as->cell); + struct afs_net *net = afs_net(as->net_ns); + afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi); + afs_put_cell(net, as->cell); put_net(as->net_ns); kfree(as); } @@ -515,7 +530,6 @@ static void afs_destroy_sbi(struct afs_super_info *as) static void afs_kill_super(struct super_block *sb) { struct afs_super_info *as = AFS_FS_S(sb); - struct afs_net *net = afs_net(as->net_ns); if (as->dyn_root) afs_dynroot_depopulate(sb); @@ -524,7 +538,7 @@ static void afs_kill_super(struct super_block *sb) * deactivating the superblock. */ if (as->volume) - afs_clear_callback_interests(net, as->volume->servers); + rcu_assign_pointer(as->volume->sb, NULL); kill_anon_super(sb); if (as->volume) afs_deactivate_volume(as->volume); @@ -592,7 +606,7 @@ static void afs_free_fc(struct fs_context *fc) struct afs_fs_context *ctx = fc->fs_private; afs_destroy_sbi(fc->s_fs_info); - afs_put_volume(ctx->cell, ctx->volume); + afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc); afs_put_cell(ctx->net, ctx->cell); key_put(ctx->key); kfree(ctx); @@ -674,7 +688,6 @@ static struct inode *afs_alloc_inode(struct super_block *sb) vnode->volume = NULL; vnode->lock_key = NULL; vnode->permit_cache = NULL; - RCU_INIT_POINTER(vnode->cb_interest, NULL); #ifdef CONFIG_AFS_FSCACHE vnode->cache = NULL; #endif @@ -704,22 +717,38 @@ static void afs_destroy_inode(struct inode *inode) _debug("DESTROY INODE %p", inode); - ASSERTCMP(rcu_access_pointer(vnode->cb_interest), ==, NULL); - atomic_dec(&afs_count_active_inodes); } +static void afs_get_volume_status_success(struct afs_operation *op) +{ + struct afs_volume_status *vs = &op->volstatus.vs; + struct kstatfs *buf = op->volstatus.buf; + + if (vs->max_quota == 0) + buf->f_blocks = vs->part_max_blocks; + else + buf->f_blocks = vs->max_quota; + + if (buf->f_blocks > vs->blocks_in_use) + buf->f_bavail = buf->f_bfree = + buf->f_blocks - vs->blocks_in_use; +} + +static const struct afs_operation_ops afs_get_volume_status_operation = { + .issue_afs_rpc = afs_fs_get_volume_status, + .issue_yfs_rpc = yfs_fs_get_volume_status, + .success = afs_get_volume_status_success, +}; + /* * return information about an AFS volume */ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct afs_super_info *as = AFS_FS_S(dentry->d_sb); - struct afs_fs_cursor fc; - struct afs_volume_status vs; + struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - struct key *key; - int ret; buf->f_type = dentry->d_sb->s_magic; buf->f_bsize = AFS_BLOCK_SIZE; @@ -732,31 +761,13 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } - key = afs_request_key(vnode->volume->cell); - if (IS_ERR(key)) - return PTR_ERR(key); + op = afs_alloc_operation(NULL, as->volume); + if (IS_ERR(op)) + return PTR_ERR(op); - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, true)) { - fc.flags |= AFS_FS_CURSOR_NO_VSLEEP; - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_get_volume_status(&fc, &vs); - } - - afs_check_for_remote_deletion(&fc, fc.vnode); - ret = afs_end_vnode_operation(&fc); - } - - key_put(key); - - if (ret == 0) { - if (vs.max_quota == 0) - buf->f_blocks = vs.part_max_blocks; - else - buf->f_blocks = vs.max_quota; - buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use; - } - - return ret; + afs_op_set_vnode(op, 0, vnode); + op->nr_files = 1; + op->volstatus.buf = buf; + op->ops = &afs_get_volume_status_operation; + return afs_do_sync_operation(op); } diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c new file mode 100644 index 000000000000..093895c49c21 --- /dev/null +++ b/fs/afs/vl_alias.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* AFS cell alias detection + * + * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/namei.h> +#include <keys/rxrpc-type.h> +#include "internal.h" + +/* + * Sample a volume. + */ +static struct afs_volume *afs_sample_volume(struct afs_cell *cell, struct key *key, + const char *name, unsigned int namelen) +{ + struct afs_volume *volume; + struct afs_fs_context fc = { + .type = 0, /* Explicitly leave it to the VLDB */ + .volnamesz = namelen, + .volname = name, + .net = cell->net, + .cell = cell, + .key = key, /* This might need to be something */ + }; + + volume = afs_create_volume(&fc); + _leave(" = %px", volume); + return volume; +} + +/* + * Compare two addresses. + */ +static int afs_compare_addrs(const struct sockaddr_rxrpc *srx_a, + const struct sockaddr_rxrpc *srx_b) +{ + short port_a, port_b; + int addr_a, addr_b, diff; + + diff = (short)srx_a->transport_type - (short)srx_b->transport_type; + if (diff) + goto out; + + switch (srx_a->transport_type) { + case AF_INET: { + const struct sockaddr_in *a = &srx_a->transport.sin; + const struct sockaddr_in *b = &srx_b->transport.sin; + addr_a = ntohl(a->sin_addr.s_addr); + addr_b = ntohl(b->sin_addr.s_addr); + diff = addr_a - addr_b; + if (diff == 0) { + port_a = ntohs(a->sin_port); + port_b = ntohs(b->sin_port); + diff = port_a - port_b; + } + break; + } + + case AF_INET6: { + const struct sockaddr_in6 *a = &srx_a->transport.sin6; + const struct sockaddr_in6 *b = &srx_b->transport.sin6; + diff = memcmp(&a->sin6_addr, &b->sin6_addr, 16); + if (diff == 0) { + port_a = ntohs(a->sin6_port); + port_b = ntohs(b->sin6_port); + diff = port_a - port_b; + } + break; + } + + default: + BUG(); + } + +out: + return diff; +} + +/* + * Compare the address lists of a pair of fileservers. + */ +static int afs_compare_fs_alists(const struct afs_server *server_a, + const struct afs_server *server_b) +{ + const struct afs_addr_list *la, *lb; + int a = 0, b = 0, addr_matches = 0; + + la = rcu_dereference(server_a->addresses); + lb = rcu_dereference(server_b->addresses); + + while (a < la->nr_addrs && b < lb->nr_addrs) { + const struct sockaddr_rxrpc *srx_a = &la->addrs[a]; + const struct sockaddr_rxrpc *srx_b = &lb->addrs[b]; + int diff = afs_compare_addrs(srx_a, srx_b); + + if (diff < 0) { + a++; + } else if (diff > 0) { + b++; + } else { + addr_matches++; + a++; + b++; + } + } + + return addr_matches; +} + +/* + * Compare the fileserver lists of two volumes. The server lists are sorted in + * order of ascending UUID. + */ +static int afs_compare_volume_slists(const struct afs_volume *vol_a, + const struct afs_volume *vol_b) +{ + const struct afs_server_list *la, *lb; + int i, a = 0, b = 0, uuid_matches = 0, addr_matches = 0; + + la = rcu_dereference(vol_a->servers); + lb = rcu_dereference(vol_b->servers); + + for (i = 0; i < AFS_MAXTYPES; i++) + if (la->vids[i] != lb->vids[i]) + return 0; + + while (a < la->nr_servers && b < lb->nr_servers) { + const struct afs_server *server_a = la->servers[a].server; + const struct afs_server *server_b = lb->servers[b].server; + int diff = memcmp(&server_a->uuid, &server_b->uuid, sizeof(uuid_t)); + + if (diff < 0) { + a++; + } else if (diff > 0) { + b++; + } else { + uuid_matches++; + addr_matches += afs_compare_fs_alists(server_a, server_b); + a++; + b++; + } + } + + _leave(" = %d [um %d]", addr_matches, uuid_matches); + return addr_matches; +} + +/* + * Compare root.cell volumes. + */ +static int afs_compare_cell_roots(struct afs_cell *cell) +{ + struct afs_cell *p; + + _enter(""); + + rcu_read_lock(); + + hlist_for_each_entry_rcu(p, &cell->net->proc_cells, proc_link) { + if (p == cell || p->alias_of) + continue; + if (!p->root_volume) + continue; /* Ignore cells that don't have a root.cell volume. */ + + if (afs_compare_volume_slists(cell->root_volume, p->root_volume) != 0) + goto is_alias; + } + + rcu_read_unlock(); + _leave(" = 0"); + return 0; + +is_alias: + rcu_read_unlock(); + cell->alias_of = afs_get_cell(p); + return 1; +} + +/* + * Query the new cell for a volume from a cell we're already using. + */ +static int afs_query_for_alias_one(struct afs_cell *cell, struct key *key, + struct afs_cell *p) +{ + struct afs_volume *volume, *pvol = NULL; + int ret; + + /* Arbitrarily pick a volume from the list. */ + read_seqlock_excl(&p->volume_lock); + if (!RB_EMPTY_ROOT(&p->volumes)) + pvol = afs_get_volume(rb_entry(p->volumes.rb_node, + struct afs_volume, cell_node), + afs_volume_trace_get_query_alias); + read_sequnlock_excl(&p->volume_lock); + if (!pvol) + return 0; + + _enter("%s:%s", cell->name, pvol->name); + + /* And see if it's in the new cell. */ + volume = afs_sample_volume(cell, key, pvol->name, pvol->name_len); + if (IS_ERR(volume)) { + afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias); + if (PTR_ERR(volume) != -ENOMEDIUM) + return PTR_ERR(volume); + /* That volume is not in the new cell, so not an alias */ + return 0; + } + + /* The new cell has a like-named volume also - compare volume ID, + * server and address lists. + */ + ret = 0; + if (pvol->vid == volume->vid) { + rcu_read_lock(); + if (afs_compare_volume_slists(volume, pvol)) + ret = 1; + rcu_read_unlock(); + } + + afs_put_volume(cell->net, volume, afs_volume_trace_put_query_alias); + afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias); + return ret; +} + +/* + * Query the new cell for volumes we know exist in cells we're already using. + */ +static int afs_query_for_alias(struct afs_cell *cell, struct key *key) +{ + struct afs_cell *p; + + _enter("%s", cell->name); + + if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) + return -ERESTARTSYS; + + hlist_for_each_entry(p, &cell->net->proc_cells, proc_link) { + if (p == cell || p->alias_of) + continue; + if (RB_EMPTY_ROOT(&p->volumes)) + continue; + if (p->root_volume) + continue; /* Ignore cells that have a root.cell volume. */ + afs_get_cell(p); + mutex_unlock(&cell->net->proc_cells_lock); + + if (afs_query_for_alias_one(cell, key, p) != 0) + goto is_alias; + + if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) { + afs_put_cell(cell->net, p); + return -ERESTARTSYS; + } + + afs_put_cell(cell->net, p); + } + + mutex_unlock(&cell->net->proc_cells_lock); + _leave(" = 0"); + return 0; + +is_alias: + cell->alias_of = p; /* Transfer our ref */ + return 1; +} + +/* + * Look up a VLDB record for a volume. + */ +static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key) +{ + struct afs_vl_cursor vc; + char *cell_name = ERR_PTR(-EDESTADDRREQ); + bool skipped = false, not_skipped = false; + int ret; + + if (!afs_begin_vlserver_operation(&vc, cell, key)) + return ERR_PTR(-ERESTARTSYS); + + while (afs_select_vlserver(&vc)) { + if (!test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) { + vc.ac.error = -EOPNOTSUPP; + skipped = true; + continue; + } + not_skipped = true; + cell_name = afs_yfsvl_get_cell_name(&vc); + } + + ret = afs_end_vlserver_operation(&vc); + if (skipped && !not_skipped) + ret = -EOPNOTSUPP; + return ret < 0 ? ERR_PTR(ret) : cell_name; +} + +static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key) +{ + struct afs_cell *master; + char *cell_name; + + cell_name = afs_vl_get_cell_name(cell, key); + if (IS_ERR(cell_name)) + return PTR_ERR(cell_name); + + if (strcmp(cell_name, cell->name) == 0) { + kfree(cell_name); + return 0; + } + + master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name), + NULL, false); + kfree(cell_name); + if (IS_ERR(master)) + return PTR_ERR(master); + + cell->alias_of = master; /* Transfer our ref */ + return 1; +} + +static int afs_do_cell_detect_alias(struct afs_cell *cell, struct key *key) +{ + struct afs_volume *root_volume; + int ret; + + _enter("%s", cell->name); + + ret = yfs_check_canonical_cell_name(cell, key); + if (ret != -EOPNOTSUPP) + return ret; + + /* Try and get the root.cell volume for comparison with other cells */ + root_volume = afs_sample_volume(cell, key, "root.cell", 9); + if (!IS_ERR(root_volume)) { + cell->root_volume = root_volume; + return afs_compare_cell_roots(cell); + } + + if (PTR_ERR(root_volume) != -ENOMEDIUM) + return PTR_ERR(root_volume); + + /* Okay, this cell doesn't have an root.cell volume. We need to + * locate some other random volume and use that to check. + */ + return afs_query_for_alias(cell, key); +} + +/* + * Check to see if a new cell is an alias of a cell we already have. At this + * point we have the cell's volume server list. + * + * Returns 0 if we didn't detect an alias, 1 if we found an alias and an error + * if we had problems gathering the data required. In the case the we did + * detect an alias, cell->alias_of is set to point to the assumed master. + */ +int afs_cell_detect_alias(struct afs_cell *cell, struct key *key) +{ + struct afs_net *net = cell->net; + int ret; + + if (mutex_lock_interruptible(&net->cells_alias_lock) < 0) + return -ERESTARTSYS; + + if (test_bit(AFS_CELL_FL_CHECK_ALIAS, &cell->flags)) { + ret = afs_do_cell_detect_alias(cell, key); + if (ret >= 0) + clear_bit_unlock(AFS_CELL_FL_CHECK_ALIAS, &cell->flags); + } else { + ret = cell->alias_of ? 1 : 0; + } + + mutex_unlock(&net->cells_alias_lock); + + if (ret == 1) + pr_notice("kAFS: Cell %s is an alias of %s\n", + cell->name, cell->alias_of->name); + return ret; +} diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c index 72eacc14e6e1..f405ca8b240a 100644 --- a/fs/afs/vl_rotate.c +++ b/fs/afs/vl_rotate.c @@ -151,6 +151,10 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc) vc->error = error; vc->flags |= AFS_VL_CURSOR_RETRY; goto next_server; + + case -EOPNOTSUPP: + _debug("notsupp"); + goto next_server; } restart_from_beginning: diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index 516e9a3bb5b4..fd82850cd424 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -82,6 +82,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) for (j = 0; j < 6; j++) uuid->node[j] = (u8)ntohl(xdr->node[j]); + entry->addr_version[n] = ntohl(uvldb->serverUnique[i]); entry->nr_servers++; } @@ -447,8 +448,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) call->count2 = ntohl(*bp); /* Type or next count */ if (call->count > YFS_MAXENDPOINTS) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_fsendpt_num); + return afs_protocol_error(call, afs_eproto_yvl_fsendpt_num); alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT); if (!alist) @@ -468,8 +468,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) size = sizeof(__be32) * (1 + 4 + 1); break; default: - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_fsendpt_type); + return afs_protocol_error(call, afs_eproto_yvl_fsendpt_type); } size += sizeof(__be32); @@ -487,21 +486,20 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) switch (call->count2) { case YFS_ENDPOINT_IPV4: if (ntohl(bp[0]) != sizeof(__be32) * 2) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_fsendpt4_len); + return afs_protocol_error( + call, afs_eproto_yvl_fsendpt4_len); afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2])); bp += 3; break; case YFS_ENDPOINT_IPV6: if (ntohl(bp[0]) != sizeof(__be32) * 5) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_fsendpt6_len); + return afs_protocol_error( + call, afs_eproto_yvl_fsendpt6_len); afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5])); bp += 6; break; default: - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_fsendpt_type); + return afs_protocol_error(call, afs_eproto_yvl_fsendpt_type); } /* Got either the type of the next entry or the count of @@ -519,8 +517,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) if (!call->count) goto end; if (call->count > YFS_MAXENDPOINTS) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_vlendpt_type); + return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type); afs_extract_to_buf(call, 1 * sizeof(__be32)); call->unmarshall = 3; @@ -547,8 +544,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) size = sizeof(__be32) * (1 + 4 + 1); break; default: - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_vlendpt_type); + return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type); } if (call->count > 1) @@ -566,19 +562,18 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) switch (call->count2) { case YFS_ENDPOINT_IPV4: if (ntohl(bp[0]) != sizeof(__be32) * 2) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_vlendpt4_len); + return afs_protocol_error( + call, afs_eproto_yvl_vlendpt4_len); bp += 3; break; case YFS_ENDPOINT_IPV6: if (ntohl(bp[0]) != sizeof(__be32) * 5) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_vlendpt6_len); + return afs_protocol_error( + call, afs_eproto_yvl_vlendpt6_len); bp += 6; break; default: - return afs_protocol_error(call, -EBADMSG, - afs_eproto_yvl_vlendpt_type); + return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type); } /* Got either the type of the next entry or the count of @@ -650,3 +645,114 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc, afs_make_call(&vc->ac, call, GFP_KERNEL); return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac); } + +/* + * Deliver reply data to a YFSVL.GetCellName operation. + */ +static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) +{ + char *cell_name; + u32 namesz, paddedsz; + int ret; + + _enter("{%u,%zu/%u}", + call->unmarshall, iov_iter_count(call->iter), call->count); + + switch (call->unmarshall) { + case 0: + afs_extract_to_tmp(call); + call->unmarshall++; + + /* Fall through - and extract the cell name length */ + case 1: + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + namesz = ntohl(call->tmp); + if (namesz > AFS_MAXCELLNAME) + return afs_protocol_error(call, afs_eproto_cellname_len); + paddedsz = (namesz + 3) & ~3; + call->count = namesz; + call->count2 = paddedsz - namesz; + + cell_name = kmalloc(namesz + 1, GFP_KERNEL); + if (!cell_name) + return -ENOMEM; + cell_name[namesz] = 0; + call->ret_str = cell_name; + + afs_extract_begin(call, cell_name, namesz); + call->unmarshall++; + + /* Fall through - and extract cell name */ + case 2: + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + afs_extract_discard(call, call->count2); + call->unmarshall++; + + /* Fall through - and extract padding */ + case 3: + ret = afs_extract_data(call, false); + if (ret < 0) + return ret; + + call->unmarshall++; + break; + } + + _leave(" = 0 [done]"); + return 0; +} + +static void afs_destroy_yfsvl_get_cell_name(struct afs_call *call) +{ + kfree(call->ret_str); + afs_flat_call_destructor(call); +} + +/* + * VL.GetCapabilities operation type + */ +static const struct afs_call_type afs_YFSVLGetCellName = { + .name = "YFSVL.GetCellName", + .op = afs_YFSVL_GetCellName, + .deliver = afs_deliver_yfsvl_get_cell_name, + .destructor = afs_destroy_yfsvl_get_cell_name, +}; + +/* + * Probe a volume server for the capabilities that it supports. This can + * return up to 196 words. + * + * We use this to probe for service upgrade to determine what the server at the + * other end supports. + */ +char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc) +{ + struct afs_call *call; + struct afs_net *net = vc->cell->net; + __be32 *bp; + + _enter(""); + + call = afs_alloc_flat_call(net, &afs_YFSVLGetCellName, 1 * 4, 0); + if (!call) + return ERR_PTR(-ENOMEM); + + call->key = vc->key; + call->ret_str = NULL; + call->max_lifespan = AFS_VL_MAX_LIFESPAN; + + /* marshall the parameters */ + bp = call->request; + *bp++ = htonl(YVLGETCELLNAME); + + /* Can't take a ref on server */ + trace_afs_make_vl_call(call); + afs_make_call(&vc->ac, call, GFP_KERNEL); + return (char *)afs_wait_for_call_to_complete(call, &vc->ac); +} diff --git a/fs/afs/volume.c b/fs/afs/volume.c index 4310336b9bb8..9bc0509e3634 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -13,6 +13,56 @@ unsigned __read_mostly afs_volume_gc_delay = 10; unsigned __read_mostly afs_volume_record_life = 60 * 60; /* + * Insert a volume into a cell. If there's an existing volume record, that is + * returned instead with a ref held. + */ +static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell, + struct afs_volume *volume) +{ + struct afs_volume *p; + struct rb_node *parent = NULL, **pp; + + write_seqlock(&cell->volume_lock); + + pp = &cell->volumes.rb_node; + while (*pp) { + parent = *pp; + p = rb_entry(parent, struct afs_volume, cell_node); + if (p->vid < volume->vid) { + pp = &(*pp)->rb_left; + } else if (p->vid > volume->vid) { + pp = &(*pp)->rb_right; + } else { + volume = afs_get_volume(p, afs_volume_trace_get_cell_insert); + goto found; + } + } + + rb_link_node_rcu(&volume->cell_node, parent, pp); + rb_insert_color(&volume->cell_node, &cell->volumes); + hlist_add_head_rcu(&volume->proc_link, &cell->proc_volumes); + +found: + write_sequnlock(&cell->volume_lock); + return volume; + +} + +static void afs_remove_volume_from_cell(struct afs_volume *volume) +{ + struct afs_cell *cell = volume->cell; + + if (!hlist_unhashed(&volume->proc_link)) { + trace_afs_volume(volume->vid, atomic_read(&volume->usage), + afs_volume_trace_remove); + write_seqlock(&cell->volume_lock); + hlist_del_rcu(&volume->proc_link); + rb_erase(&volume->cell_node, &cell->volumes); + write_sequnlock(&cell->volume_lock); + } +} + +/* * Allocate a volume record and load it up from a vldb record. */ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, @@ -39,7 +89,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, volume->name_len = vldb->name_len; atomic_set(&volume->usage, 1); - INIT_LIST_HEAD(&volume->proc_link); + INIT_HLIST_NODE(&volume->proc_link); rwlock_init(&volume->servers_lock); rwlock_init(&volume->cb_v_break_lock); memcpy(volume->name, vldb->name, vldb->name_len + 1); @@ -51,7 +101,8 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, } refcount_set(&slist->usage, 1); - volume->servers = slist; + rcu_assign_pointer(volume->servers, slist); + trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc); return volume; error_1: @@ -62,6 +113,25 @@ error_0: } /* + * Look up or allocate a volume record. + */ +static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params, + struct afs_vldb_entry *vldb, + unsigned long type_mask) +{ + struct afs_volume *candidate, *volume; + + candidate = afs_alloc_volume(params, vldb, type_mask); + if (IS_ERR(candidate)) + return candidate; + + volume = afs_insert_volume_into_cell(params->cell, candidate); + if (volume != candidate) + afs_put_volume(params->net, candidate, afs_volume_trace_put_cell_dup); + return volume; +} + +/* * Look up a VLDB record for a volume. */ static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell, @@ -138,7 +208,7 @@ struct afs_volume *afs_create_volume(struct afs_fs_context *params) } type_mask = 1UL << params->type; - volume = afs_alloc_volume(params, vldb, type_mask); + volume = afs_lookup_volume(params, vldb, type_mask); error: kfree(vldb); @@ -156,23 +226,42 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume) ASSERTCMP(volume->cache, ==, NULL); #endif - afs_put_serverlist(net, volume->servers); + afs_remove_volume_from_cell(volume); + afs_put_serverlist(net, rcu_access_pointer(volume->servers)); afs_put_cell(net, volume->cell); - kfree(volume); + trace_afs_volume(volume->vid, atomic_read(&volume->usage), + afs_volume_trace_free); + kfree_rcu(volume, rcu); _leave(" [destroyed]"); } /* - * Drop a reference on a volume record. + * Get a reference on a volume record. */ -void afs_put_volume(struct afs_cell *cell, struct afs_volume *volume) +struct afs_volume *afs_get_volume(struct afs_volume *volume, + enum afs_volume_trace reason) { if (volume) { - _enter("%s", volume->name); + int u = atomic_inc_return(&volume->usage); + trace_afs_volume(volume->vid, u, reason); + } + return volume; +} + - if (atomic_dec_and_test(&volume->usage)) - afs_destroy_volume(cell->net, volume); +/* + * Drop a reference on a volume record. + */ +void afs_put_volume(struct afs_net *net, struct afs_volume *volume, + enum afs_volume_trace reason) +{ + if (volume) { + afs_volid_t vid = volume->vid; + int u = atomic_dec_return(&volume->usage); + trace_afs_volume(vid, u, reason); + if (u == 0) + afs_destroy_volume(net, volume); } } @@ -188,10 +277,6 @@ void afs_activate_volume(struct afs_volume *volume) NULL, 0, volume, 0, true); #endif - - write_lock(&volume->cell->proc_lock); - list_add_tail(&volume->proc_link, &volume->cell->proc_volumes); - write_unlock(&volume->cell->proc_lock); } /* @@ -201,10 +286,6 @@ void afs_deactivate_volume(struct afs_volume *volume) { _enter("%s", volume->name); - write_lock(&volume->cell->proc_lock); - list_del_init(&volume->proc_link); - write_unlock(&volume->cell->proc_lock); - #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_cookie(volume->cache, NULL, test_bit(AFS_VOLUME_DELETED, &volume->flags)); @@ -256,17 +337,17 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key) write_lock(&volume->servers_lock); discard = new; - old = volume->servers; + old = rcu_dereference_protected(volume->servers, + lockdep_is_held(&volume->servers_lock)); if (afs_annotate_server_list(new, old)) { new->seq = volume->servers_seq + 1; - volume->servers = new; + rcu_assign_pointer(volume->servers, new); smp_wmb(); volume->servers_seq++; discard = old; } volume->update_at = ktime_get_real_seconds() + afs_volume_record_life; - clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); write_unlock(&volume->servers_lock); ret = 0; @@ -281,25 +362,27 @@ error: /* * Make sure the volume record is up to date. */ -int afs_check_volume_status(struct afs_volume *volume, struct afs_fs_cursor *fc) +int afs_check_volume_status(struct afs_volume *volume, struct afs_operation *op) { - time64_t now = ktime_get_real_seconds(); int ret, retries = 0; _enter(""); - if (volume->update_at <= now) - set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); - retry: - if (!test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags) && - !test_bit(AFS_VOLUME_WAIT, &volume->flags)) { - _leave(" = 0"); - return 0; - } - + if (test_bit(AFS_VOLUME_WAIT, &volume->flags)) + goto wait; + if (volume->update_at <= ktime_get_real_seconds() || + test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags)) + goto update; + _leave(" = 0"); + return 0; + +update: if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) { - ret = afs_update_volume_status(volume, fc->key); + clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); + ret = afs_update_volume_status(volume, op->key); + if (ret < 0) + set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags); clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags); wake_up_bit(&volume->flags, AFS_VOLUME_WAIT); @@ -307,14 +390,15 @@ retry: return ret; } +wait: if (!test_bit(AFS_VOLUME_WAIT, &volume->flags)) { _leave(" = 0 [no wait]"); return 0; } ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, - (fc->flags & AFS_FS_CURSOR_INTR) ? - TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + (op->flags & AFS_OPERATION_UNINTR) ? + TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); if (ret == -ERESTARTSYS) { _leave(" = %d", ret); return ret; diff --git a/fs/afs/write.c b/fs/afs/write.c index cb76566763db..97bccde3298b 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -349,82 +349,111 @@ static void afs_pages_written_back(struct afs_vnode *vnode, } /* - * write to a file + * Find a key to use for the writeback. We cached the keys used to author the + * writes on the vnode. *_wbk will contain the last writeback key used or NULL + * and we need to start from there if it's set. */ -static int afs_store_data(struct address_space *mapping, - pgoff_t first, pgoff_t last, - unsigned offset, unsigned to) +static int afs_get_writeback_key(struct afs_vnode *vnode, + struct afs_wb_key **_wbk) { - struct afs_vnode *vnode = AFS_FS_I(mapping->host); - struct afs_fs_cursor fc; - struct afs_status_cb *scb; struct afs_wb_key *wbk = NULL; struct list_head *p; int ret = -ENOKEY, ret2; - _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x", - vnode->volume->name, - vnode->fid.vid, - vnode->fid.vnode, - vnode->fid.unique, - first, last, offset, to); - - scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS); - if (!scb) - return -ENOMEM; - spin_lock(&vnode->wb_lock); - p = vnode->wb_keys.next; + if (*_wbk) + p = (*_wbk)->vnode_link.next; + else + p = vnode->wb_keys.next; - /* Iterate through the list looking for a valid key to use. */ -try_next_key: while (p != &vnode->wb_keys) { wbk = list_entry(p, struct afs_wb_key, vnode_link); _debug("wbk %u", key_serial(wbk->key)); ret2 = key_validate(wbk->key); - if (ret2 == 0) - goto found_key; + if (ret2 == 0) { + refcount_inc(&wbk->usage); + _debug("USE WB KEY %u", key_serial(wbk->key)); + break; + } + + wbk = NULL; if (ret == -ENOKEY) ret = ret2; p = p->next; } spin_unlock(&vnode->wb_lock); - afs_put_wb_key(wbk); - kfree(scb); - _leave(" = %d [no keys]", ret); - return ret; + if (*_wbk) + afs_put_wb_key(*_wbk); + *_wbk = wbk; + return 0; +} -found_key: - refcount_inc(&wbk->usage); - spin_unlock(&vnode->wb_lock); +static void afs_store_data_success(struct afs_operation *op) +{ + struct afs_vnode *vnode = op->file[0].vnode; - _debug("USE WB KEY %u", key_serial(wbk->key)); + afs_vnode_commit_status(op, &op->file[0]); + if (op->error == 0) { + afs_pages_written_back(vnode, op->store.first, op->store.last); + afs_stat_v(vnode, n_stores); + atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) - + (op->store.first * PAGE_SIZE + op->store.first_offset), + &afs_v2net(vnode)->n_store_bytes); + } +} - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, wbk->key, false)) { - afs_dataversion_t data_version = vnode->status.data_version + 1; +static const struct afs_operation_ops afs_store_data_operation = { + .issue_afs_rpc = afs_fs_store_data, + .issue_yfs_rpc = yfs_fs_store_data, + .success = afs_store_data_success, +}; - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_store_data(&fc, mapping, first, last, offset, to, scb); - } +/* + * write to a file + */ +static int afs_store_data(struct address_space *mapping, + pgoff_t first, pgoff_t last, + unsigned offset, unsigned to) +{ + struct afs_vnode *vnode = AFS_FS_I(mapping->host); + struct afs_operation *op; + struct afs_wb_key *wbk = NULL; + int ret; - afs_check_for_remote_deletion(&fc, vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, - &data_version, scb); - if (fc.ac.error == 0) - afs_pages_written_back(vnode, first, last); - ret = afs_end_vnode_operation(&fc); + _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x", + vnode->volume->name, + vnode->fid.vid, + vnode->fid.vnode, + vnode->fid.unique, + first, last, offset, to); + + ret = afs_get_writeback_key(vnode, &wbk); + if (ret) { + _leave(" = %d [no keys]", ret); + return ret; } - switch (ret) { - case 0: - afs_stat_v(vnode, n_stores); - atomic_long_add((last * PAGE_SIZE + to) - - (first * PAGE_SIZE + offset), - &afs_v2net(vnode)->n_store_bytes); - break; + op = afs_alloc_operation(wbk->key, vnode->volume); + if (IS_ERR(op)) { + afs_put_wb_key(wbk); + return -ENOMEM; + } + + afs_op_set_vnode(op, 0, vnode); + op->file[0].dv_delta = 1; + op->store.mapping = mapping; + op->store.first = first; + op->store.last = last; + op->store.first_offset = offset; + op->store.last_to = to; + op->ops = &afs_store_data_operation; + +try_next_key: + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); + + switch (op->error) { case -EACCES: case -EPERM: case -ENOKEY: @@ -432,16 +461,19 @@ found_key: case -EKEYREJECTED: case -EKEYREVOKED: _debug("next"); - spin_lock(&vnode->wb_lock); - p = wbk->vnode_link.next; - afs_put_wb_key(wbk); - goto try_next_key; + + ret = afs_get_writeback_key(vnode, &wbk); + if (ret == 0) { + key_put(op->key); + op->key = key_get(wbk->key); + goto try_next_key; + } + break; } afs_put_wb_key(wbk); - kfree(scb); - _leave(" = %d", ret); - return ret; + _leave(" = %d", op->error); + return afs_put_operation(op); } /* diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c index 7af41fd5f3ee..84f3c4f57531 100644 --- a/fs/afs/xattr.c +++ b/fs/afs/xattr.c @@ -35,6 +35,25 @@ ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size) } /* + * Deal with the result of a successful fetch ACL operation. + */ +static void afs_acl_success(struct afs_operation *op) +{ + afs_vnode_commit_status(op, &op->file[0]); +} + +static void afs_acl_put(struct afs_operation *op) +{ + kfree(op->acl); +} + +static const struct afs_operation_ops afs_fetch_acl_operation = { + .issue_afs_rpc = afs_fs_fetch_acl, + .success = afs_acl_success, + .put = afs_acl_put, +}; + +/* * Get a file's ACL. */ static int afs_xattr_get_acl(const struct xattr_handler *handler, @@ -42,37 +61,23 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler, struct inode *inode, const char *name, void *buffer, size_t size) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_acl *acl = NULL; - struct key *key; - int ret = -ENOMEM; + int ret; - scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS); - if (!scb) - goto error; - - key = afs_request_key(vnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_scb; - } + op = afs_alloc_operation(NULL, vnode->volume); + if (IS_ERR(op)) + return -ENOMEM; - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, true)) { - afs_dataversion_t data_version = vnode->status.data_version; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - acl = afs_fs_fetch_acl(&fc, scb); - } + afs_op_set_vnode(op, 0, vnode); + op->ops = &afs_fetch_acl_operation; - afs_check_for_remote_deletion(&fc, fc.vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, - &data_version, scb); - ret = afs_end_vnode_operation(&fc); - } + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); + acl = op->acl; + op->acl = NULL; + ret = afs_put_operation(op); if (ret == 0) { ret = acl->size; @@ -80,18 +85,37 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler, if (acl->size <= size) memcpy(buffer, acl->data, acl->size); else - ret = -ERANGE; + op->error = -ERANGE; } - kfree(acl); } - key_put(key); -error_scb: - kfree(scb); -error: + kfree(acl); return ret; } +static bool afs_make_acl(struct afs_operation *op, + const void *buffer, size_t size) +{ + struct afs_acl *acl; + + acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL); + if (!acl) { + afs_op_nomem(op); + return false; + } + + acl->size = size; + memcpy(acl->data, buffer, size); + op->acl = acl; + return true; +} + +static const struct afs_operation_ops afs_store_acl_operation = { + .issue_afs_rpc = afs_fs_store_acl, + .success = afs_acl_success, + .put = afs_acl_put, +}; + /* * Set a file's AFS3 ACL. */ @@ -100,55 +124,22 @@ static int afs_xattr_set_acl(const struct xattr_handler *handler, struct inode *inode, const char *name, const void *buffer, size_t size, int flags) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(inode); - struct afs_acl *acl = NULL; - struct key *key; - int ret = -ENOMEM; if (flags == XATTR_CREATE) return -EINVAL; - scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS); - if (!scb) - goto error; - - acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL); - if (!acl) - goto error_scb; - - key = afs_request_key(vnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_acl; - } - - acl->size = size; - memcpy(acl->data, buffer, size); - - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, true)) { - afs_dataversion_t data_version = vnode->status.data_version; + op = afs_alloc_operation(NULL, vnode->volume); + if (IS_ERR(op)) + return -ENOMEM; - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - afs_fs_store_acl(&fc, acl, scb); - } + afs_op_set_vnode(op, 0, vnode); + if (!afs_make_acl(op, buffer, size)) + return afs_put_operation(op); - afs_check_for_remote_deletion(&fc, fc.vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, - &data_version, scb); - ret = afs_end_vnode_operation(&fc); - } - - key_put(key); -error_acl: - kfree(acl); -error_scb: - kfree(scb); -error: - return ret; + op->ops = &afs_store_acl_operation; + return afs_do_sync_operation(op); } static const struct xattr_handler afs_xattr_afs_acl_handler = { @@ -157,6 +148,17 @@ static const struct xattr_handler afs_xattr_afs_acl_handler = { .set = afs_xattr_set_acl, }; +static void yfs_acl_put(struct afs_operation *op) +{ + yfs_free_opaque_acl(op->yacl); +} + +static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = { + .issue_yfs_rpc = yfs_fs_fetch_opaque_acl, + .success = afs_acl_success, + /* Don't free op->yacl in .put here */ +}; + /* * Get a file's YFS ACL. */ @@ -165,11 +167,9 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler, struct inode *inode, const char *name, void *buffer, size_t size) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(inode); struct yfs_acl *yacl = NULL; - struct key *key; char buf[16], *data; int which = 0, dsize, ret = -ENOMEM; @@ -193,75 +193,62 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler, else if (which == 3) yacl->flags |= YFS_ACL_WANT_VOL_ACL; - scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS); - if (!scb) + op = afs_alloc_operation(NULL, vnode->volume); + if (IS_ERR(op)) goto error_yacl; - key = afs_request_key(vnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_scb; - } + afs_op_set_vnode(op, 0, vnode); + op->yacl = yacl; + op->ops = &yfs_fetch_opaque_acl_operation; - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, true)) { - afs_dataversion_t data_version = vnode->status.data_version; + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); + ret = afs_put_operation(op); - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - yfs_fs_fetch_opaque_acl(&fc, yacl, scb); + if (ret == 0) { + switch (which) { + case 0: + data = yacl->acl->data; + dsize = yacl->acl->size; + break; + case 1: + data = buf; + dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag); + break; + case 2: + data = buf; + dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned); + break; + case 3: + data = yacl->vol_acl->data; + dsize = yacl->vol_acl->size; + break; + default: + ret = -EOPNOTSUPP; + goto error_yacl; } - afs_check_for_remote_deletion(&fc, fc.vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, - &data_version, scb); - ret = afs_end_vnode_operation(&fc); - } - - if (ret < 0) - goto error_key; - - switch (which) { - case 0: - data = yacl->acl->data; - dsize = yacl->acl->size; - break; - case 1: - data = buf; - dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag); - break; - case 2: - data = buf; - dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned); - break; - case 3: - data = yacl->vol_acl->data; - dsize = yacl->vol_acl->size; - break; - default: - ret = -EOPNOTSUPP; - goto error_key; - } - - ret = dsize; - if (size > 0) { - if (dsize > size) { - ret = -ERANGE; - goto error_key; + ret = dsize; + if (size > 0) { + if (dsize <= size) + memcpy(buffer, data, dsize); + else + ret = -ERANGE; } - memcpy(buffer, data, dsize); } -error_key: - key_put(key); -error_scb: - kfree(scb); error_yacl: yfs_free_opaque_acl(yacl); error: return ret; } +static const struct afs_operation_ops yfs_store_opaque_acl2_operation = { + .issue_yfs_rpc = yfs_fs_store_opaque_acl2, + .success = afs_acl_success, + .put = yfs_acl_put, +}; + /* * Set a file's YFS ACL. */ @@ -270,56 +257,23 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler, struct inode *inode, const char *name, const void *buffer, size_t size, int flags) { - struct afs_fs_cursor fc; - struct afs_status_cb *scb; + struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(inode); - struct afs_acl *acl = NULL; - struct key *key; - int ret = -ENOMEM; if (flags == XATTR_CREATE || strcmp(name, "acl") != 0) return -EINVAL; - scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS); - if (!scb) - goto error; - - acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL); - if (!acl) - goto error_scb; + op = afs_alloc_operation(NULL, vnode->volume); + if (IS_ERR(op)) + return -ENOMEM; - acl->size = size; - memcpy(acl->data, buffer, size); + afs_op_set_vnode(op, 0, vnode); + if (!afs_make_acl(op, buffer, size)) + return afs_put_operation(op); - key = afs_request_key(vnode->volume->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - goto error_acl; - } - - ret = -ERESTARTSYS; - if (afs_begin_vnode_operation(&fc, vnode, key, true)) { - afs_dataversion_t data_version = vnode->status.data_version; - - while (afs_select_fileserver(&fc)) { - fc.cb_break = afs_calc_vnode_cb_break(vnode); - yfs_fs_store_opaque_acl2(&fc, acl, scb); - } - - afs_check_for_remote_deletion(&fc, fc.vnode); - afs_vnode_commit_status(&fc, vnode, fc.cb_break, - &data_version, scb); - ret = afs_end_vnode_operation(&fc); - } - -error_acl: - kfree(acl); - key_put(key); -error_scb: - kfree(scb); -error: - return ret; + op->ops = &yfs_store_opaque_acl2_operation; + return afs_do_sync_operation(op); } static const struct xattr_handler afs_xattr_yfs_handler = { diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index fe413e7a5cf4..b0a6e40b4da3 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -17,11 +17,6 @@ static const struct afs_fid afs_zero_fid; -static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi) -{ - call->cbi = afs_get_cb_interest(cbi); -} - #define xdr_size(x) (sizeof(*x) / sizeof(__be32)) static void xdr_decode_YFSFid(const __be32 **_bp, struct afs_fid *fid) @@ -79,6 +74,11 @@ static __be32 *xdr_encode_string(__be32 *bp, const char *p, unsigned int len) return bp + len / sizeof(__be32); } +static __be32 *xdr_encode_name(__be32 *bp, const struct qstr *p) +{ + return xdr_encode_string(bp, p->name, p->len); +} + static s64 linux_to_yfs_time(const struct timespec64 *t) { /* Convert to 100ns intervals. */ @@ -179,21 +179,20 @@ static void xdr_dump_bad(const __be32 *bp) /* * Decode a YFSFetchStatus block */ -static int xdr_decode_YFSFetchStatus(const __be32 **_bp, - struct afs_call *call, - struct afs_status_cb *scb) +static void xdr_decode_YFSFetchStatus(const __be32 **_bp, + struct afs_call *call, + struct afs_status_cb *scb) { const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp; struct afs_file_status *status = &scb->status; u32 type; - int ret; status->abort_code = ntohl(xdr->abort_code); if (status->abort_code != 0) { if (status->abort_code == VNOVNODE) status->nlink = 0; scb->have_error = true; - goto good; + goto advance; } type = ntohl(xdr->type); @@ -221,15 +220,13 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp, status->size = xdr_to_u64(xdr->size); status->data_version = xdr_to_u64(xdr->data_version); scb->have_status = true; -good: - ret = 0; advance: *_bp += xdr_size(xdr); - return ret; + return; bad: xdr_dump_bad(*_bp); - ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + afs_protocol_error(call, afs_eproto_bad_status); goto advance; } @@ -339,6 +336,7 @@ static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp, */ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call) { + struct afs_operation *op = call->op; const __be32 *bp; int ret; @@ -348,11 +346,9 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_YFSCallBack(&bp, call, call->out_scb); - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb); + xdr_decode_YFSCallBack(&bp, call, &op->file[0].scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -364,6 +360,7 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call) */ static int yfs_deliver_status_and_volsync(struct afs_call *call) { + struct afs_operation *op = call->op; const __be32 *bp; int ret; @@ -372,10 +369,8 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -394,44 +389,33 @@ static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = { /* * Fetch the status information for a file. */ -int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb, - struct afs_volsync *volsync) +void yfs_fs_fetch_file_status(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus_vnode, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus_vnode, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); - if (!call) { - fc->ac.error = -ENOMEM; - return -ENOMEM; - } - - call->key = fc->key; - call->out_scb = scb; - call->out_volsync = volsync; + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHSTATUS); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -439,7 +423,9 @@ int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb */ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) { - struct afs_read *req = call->read_request; + struct afs_operation *op = call->op; + struct afs_vnode_param *vp = &op->file[0]; + struct afs_read *req = op->fetch.req; const __be32 *bp; unsigned int size; int ret; @@ -534,14 +520,12 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_YFSCallBack(&bp, call, call->out_scb); - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_YFSCallBack(&bp, call, &vp->scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); - req->data_version = call->out_scb->status.data_version; - req->file_size = call->out_scb->status.size; + req->data_version = vp->scb.status.data_version; + req->file_size = vp->scb.status.size; call->unmarshall++; /* Fall through */ @@ -565,12 +549,6 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) return 0; } -static void yfs_fetch_data_destructor(struct afs_call *call) -{ - afs_put_read(call->read_request); - afs_flat_call_destructor(call); -} - /* * YFS.FetchData64 operation type */ @@ -578,25 +556,24 @@ static const struct afs_call_type yfs_RXYFSFetchData64 = { .name = "YFS.FetchData64", .op = yfs_FS_FetchData64, .deliver = yfs_deliver_fs_fetch_data64, - .destructor = yfs_fetch_data_destructor, + .destructor = afs_flat_call_destructor, }; /* * Fetch data from a file. */ -int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb, - struct afs_read *req) +void yfs_fs_fetch_data(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; + struct afs_read *req = op->fetch.req; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(",%x,{%llx:%llu},%llx,%llx", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode, + key_serial(op->key), vp->fid.vid, vp->fid.vnode, req->pos, req->len); - call = afs_alloc_flat_call(net, &yfs_RXYFSFetchData64, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchData64, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_u64) * 2, @@ -604,27 +581,19 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb, sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_scb = scb; - call->out_volsync = NULL; - call->read_request = afs_get_read(req); + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHDATA64); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_u64(bp, req->pos); bp = xdr_encode_u64(bp, req->len); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -632,6 +601,9 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb, */ static int yfs_deliver_fs_create_vnode(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; @@ -643,15 +615,11 @@ static int yfs_deliver_fs_create_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - xdr_decode_YFSFid(&bp, call->out_fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - xdr_decode_YFSCallBack(&bp, call, call->out_scb); - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSFid(&bp, &op->file[1].fid); + xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); + xdr_decode_YFSCallBack(&bp, call, &vp->scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -670,26 +638,20 @@ static const struct afs_call_type afs_RXFSCreateFile = { /* * Create a file. */ -int yfs_fs_create_file(struct afs_fs_cursor *fc, - const char *name, - umode_t mode, - struct afs_status_cb *dvnode_scb, - struct afs_fid *newfid, - struct afs_status_cb *new_scb) +void yfs_fs_create_file(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(dvnode); - size_t namesz, reqsz, rplsz; + size_t reqsz, rplsz; __be32 *bp; _enter(""); - namesz = strlen(name); reqsz = (sizeof(__be32) + sizeof(__be32) + sizeof(struct yfs_xdr_YFSFid) + - xdr_strlen(namesz) + + xdr_strlen(name->len) + sizeof(struct yfs_xdr_YFSStoreStatus) + sizeof(__be32)); rplsz = (sizeof(struct yfs_xdr_YFSFid) + @@ -698,30 +660,22 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc, sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); - call = afs_alloc_flat_call(net, &afs_RXFSCreateFile, reqsz, rplsz); + call = afs_alloc_flat_call(op->net, &afs_RXFSCreateFile, reqsz, rplsz); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; - call->out_fid = newfid; - call->out_scb = new_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSCREATEFILE); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &dvnode->fid); - bp = xdr_encode_string(bp, name, namesz); - bp = xdr_encode_YFSStoreStatus_mode(bp, mode); + bp = xdr_encode_YFSFid(bp, &dvp->fid); + bp = xdr_encode_name(bp, name); + bp = xdr_encode_YFSStoreStatus_mode(bp, op->create.mode); bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &dvnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } static const struct afs_call_type yfs_RXFSMakeDir = { @@ -734,26 +688,20 @@ static const struct afs_call_type yfs_RXFSMakeDir = { /* * Make a directory. */ -int yfs_fs_make_dir(struct afs_fs_cursor *fc, - const char *name, - umode_t mode, - struct afs_status_cb *dvnode_scb, - struct afs_fid *newfid, - struct afs_status_cb *new_scb) +void yfs_fs_make_dir(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(dvnode); - size_t namesz, reqsz, rplsz; + size_t reqsz, rplsz; __be32 *bp; _enter(""); - namesz = strlen(name); reqsz = (sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + - xdr_strlen(namesz) + + xdr_strlen(name->len) + sizeof(struct yfs_xdr_YFSStoreStatus)); rplsz = (sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSFetchStatus) + @@ -761,29 +709,21 @@ int yfs_fs_make_dir(struct afs_fs_cursor *fc, sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); - call = afs_alloc_flat_call(net, &yfs_RXFSMakeDir, reqsz, rplsz); + call = afs_alloc_flat_call(op->net, &yfs_RXFSMakeDir, reqsz, rplsz); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; - call->out_fid = newfid; - call->out_scb = new_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSMAKEDIR); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &dvnode->fid); - bp = xdr_encode_string(bp, name, namesz); - bp = xdr_encode_YFSStoreStatus_mode(bp, mode); + bp = xdr_encode_YFSFid(bp, &dvp->fid); + bp = xdr_encode_name(bp, name); + bp = xdr_encode_YFSStoreStatus_mode(bp, op->create.mode); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &dvnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -791,6 +731,9 @@ int yfs_fs_make_dir(struct afs_fs_cursor *fc, */ static int yfs_deliver_fs_remove_file2(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; struct afs_fid fid; const __be32 *bp; int ret; @@ -802,20 +745,24 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - + xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_YFSFid(&bp, &fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); /* Was deleted if vnode->status.abort_code == VNOVNODE. */ - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSVolSync(&bp, &op->volsync); return 0; } +static void yfs_done_fs_remove_file2(struct afs_call *call) +{ + if (call->error == -ECONNABORTED && + call->abort_code == RX_INVALID_OPERATION) { + set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags); + call->op->flags |= AFS_OPERATION_DOWNGRADE; + } +} + /* * YFS.RemoveFile2 operation type. */ @@ -823,55 +770,44 @@ static const struct afs_call_type yfs_RXYFSRemoveFile2 = { .name = "YFS.RemoveFile2", .op = yfs_FS_RemoveFile2, .deliver = yfs_deliver_fs_remove_file2, + .done = yfs_done_fs_remove_file2, .destructor = afs_flat_call_destructor, }; /* * Remove a file and retrieve new file status. */ -int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode, - const char *name, struct afs_status_cb *dvnode_scb, - struct afs_status_cb *vnode_scb) +void yfs_fs_remove_file2(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + struct afs_vnode_param *dvp = &op->file[0]; + const struct qstr *name = &op->dentry->d_name; struct afs_call *call; - struct afs_net *net = afs_v2net(dvnode); - size_t namesz; __be32 *bp; _enter(""); - namesz = strlen(name); - - call = afs_alloc_flat_call(net, &yfs_RXYFSRemoveFile2, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveFile2, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + - xdr_strlen(namesz), + xdr_strlen(name->len), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; - call->out_scb = vnode_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSREMOVEFILE2); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &dvnode->fid); - bp = xdr_encode_string(bp, name, namesz); + bp = xdr_encode_YFSFid(bp, &dvp->fid); + bp = xdr_encode_name(bp, name); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &dvnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -879,6 +815,8 @@ int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode, */ static int yfs_deliver_fs_remove(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *dvp = &op->file[0]; const __be32 *bp; int ret; @@ -889,11 +827,8 @@ static int yfs_deliver_fs_remove(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); return 0; } @@ -907,6 +842,43 @@ static const struct afs_call_type yfs_RXYFSRemoveFile = { .destructor = afs_flat_call_destructor, }; +/* + * Remove a file. + */ +void yfs_fs_remove_file(struct afs_operation *op) +{ + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_call *call; + __be32 *bp; + + _enter(""); + + if (!test_bit(AFS_SERVER_FL_NO_RM2, &op->server->flags)) + return yfs_fs_remove_file2(op); + + call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveFile, + sizeof(__be32) + + sizeof(struct yfs_xdr_RPCFlags) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(name->len), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return afs_op_nomem(op); + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSREMOVEFILE); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &dvp->fid); + bp = xdr_encode_name(bp, name); + yfs_check_req(call, bp); + + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); +} + static const struct afs_call_type yfs_RXYFSRemoveDir = { .name = "YFS.RemoveDir", .op = yfs_FS_RemoveDir, @@ -915,48 +887,37 @@ static const struct afs_call_type yfs_RXYFSRemoveDir = { }; /* - * remove a file or directory + * Remove a directory. */ -int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode, - const char *name, bool isdir, - struct afs_status_cb *dvnode_scb) +void yfs_fs_remove_dir(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(dvnode); - size_t namesz; __be32 *bp; _enter(""); - namesz = strlen(name); - call = afs_alloc_flat_call( - net, isdir ? &yfs_RXYFSRemoveDir : &yfs_RXYFSRemoveFile, - sizeof(__be32) + - sizeof(struct yfs_xdr_RPCFlags) + - sizeof(struct yfs_xdr_YFSFid) + - xdr_strlen(namesz), - sizeof(struct yfs_xdr_YFSFetchStatus) + - sizeof(struct yfs_xdr_YFSVolSync)); + call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveDir, + sizeof(__be32) + + sizeof(struct yfs_xdr_RPCFlags) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(name->len), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; - bp = xdr_encode_u32(bp, isdir ? YFSREMOVEDIR : YFSREMOVEFILE); + bp = xdr_encode_u32(bp, YFSREMOVEDIR); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &dvnode->fid); - bp = xdr_encode_string(bp, name, namesz); + bp = xdr_encode_YFSFid(bp, &dvp->fid); + bp = xdr_encode_name(bp, name); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &dvnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -964,6 +925,9 @@ int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode, */ static int yfs_deliver_fs_link(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; @@ -974,13 +938,9 @@ static int yfs_deliver_fs_link(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } @@ -998,50 +958,39 @@ static const struct afs_call_type yfs_RXYFSLink = { /* * Make a hard link. */ -int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode, - const char *name, - struct afs_status_cb *dvnode_scb, - struct afs_status_cb *vnode_scb) +void yfs_fs_link(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); - size_t namesz; __be32 *bp; _enter(""); - namesz = strlen(name); - call = afs_alloc_flat_call(net, &yfs_RXYFSLink, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSLink, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + - xdr_strlen(namesz) + + xdr_strlen(name->len) + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; - call->out_scb = vnode_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSLINK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &dvnode->fid); - bp = xdr_encode_string(bp, name, namesz); - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &dvp->fid); + bp = xdr_encode_name(bp, name); + bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &vnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &vp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1049,6 +998,9 @@ int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode, */ static int yfs_deliver_fs_symlink(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; @@ -1060,14 +1012,10 @@ static int yfs_deliver_fs_symlink(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - xdr_decode_YFSFid(&bp, call->out_fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSFid(&bp, &vp->fid); + xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; @@ -1086,28 +1034,22 @@ static const struct afs_call_type yfs_RXYFSSymlink = { /* * Create a symbolic link. */ -int yfs_fs_symlink(struct afs_fs_cursor *fc, - const char *name, - const char *contents, - struct afs_status_cb *dvnode_scb, - struct afs_fid *newfid, - struct afs_status_cb *vnode_scb) +void yfs_fs_symlink(struct afs_operation *op) { - struct afs_vnode *dvnode = fc->vnode; + const struct qstr *name = &op->dentry->d_name; + struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(dvnode); - size_t namesz, contents_sz; + size_t contents_sz; __be32 *bp; _enter(""); - namesz = strlen(name); - contents_sz = strlen(contents); - call = afs_alloc_flat_call(net, &yfs_RXYFSSymlink, + contents_sz = strlen(op->create.symlink); + call = afs_alloc_flat_call(op->net, &yfs_RXYFSSymlink, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + - xdr_strlen(namesz) + + xdr_strlen(name->len) + xdr_strlen(contents_sz) + sizeof(struct yfs_xdr_YFSStoreStatus), sizeof(struct yfs_xdr_YFSFid) + @@ -1115,28 +1057,20 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = dvnode_scb; - call->out_fid = newfid; - call->out_scb = vnode_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSYMLINK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &dvnode->fid); - bp = xdr_encode_string(bp, name, namesz); - bp = xdr_encode_string(bp, contents, contents_sz); + bp = xdr_encode_YFSFid(bp, &dvp->fid); + bp = xdr_encode_name(bp, name); + bp = xdr_encode_string(bp, op->create.symlink, contents_sz); bp = xdr_encode_YFSStoreStatus_mode(bp, S_IRWXUGO); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call1(call, &dvnode->fid, name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call1(call, &dvp->fid, name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1144,6 +1078,9 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc, */ static int yfs_deliver_fs_rename(struct afs_call *call) { + struct afs_operation *op = call->op; + struct afs_vnode_param *orig_dvp = &op->file[0]; + struct afs_vnode_param *new_dvp = &op->file[1]; const __be32 *bp; int ret; @@ -1154,14 +1091,12 @@ static int yfs_deliver_fs_rename(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - - xdr_decode_YFSVolSync(&bp, call->out_volsync); + /* If the two dirs are the same, we have two copies of the same status + * report, so we just decode it twice. + */ + xdr_decode_YFSFetchStatus(&bp, call, &orig_dvp->scb); + xdr_decode_YFSFetchStatus(&bp, call, &new_dvp->scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } @@ -1179,55 +1114,42 @@ static const struct afs_call_type yfs_RXYFSRename = { /* * Rename a file or directory. */ -int yfs_fs_rename(struct afs_fs_cursor *fc, - const char *orig_name, - struct afs_vnode *new_dvnode, - const char *new_name, - struct afs_status_cb *orig_dvnode_scb, - struct afs_status_cb *new_dvnode_scb) +void yfs_fs_rename(struct afs_operation *op) { - struct afs_vnode *orig_dvnode = fc->vnode; + struct afs_vnode_param *orig_dvp = &op->file[0]; + struct afs_vnode_param *new_dvp = &op->file[1]; + const struct qstr *orig_name = &op->dentry->d_name; + const struct qstr *new_name = &op->dentry_2->d_name; struct afs_call *call; - struct afs_net *net = afs_v2net(orig_dvnode); - size_t o_namesz, n_namesz; __be32 *bp; _enter(""); - o_namesz = strlen(orig_name); - n_namesz = strlen(new_name); - call = afs_alloc_flat_call(net, &yfs_RXYFSRename, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + - xdr_strlen(o_namesz) + + xdr_strlen(orig_name->len) + sizeof(struct yfs_xdr_YFSFid) + - xdr_strlen(n_namesz), + xdr_strlen(new_name->len), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_dir_scb = orig_dvnode_scb; - call->out_scb = new_dvnode_scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSRENAME); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &orig_dvnode->fid); - bp = xdr_encode_string(bp, orig_name, o_namesz); - bp = xdr_encode_YFSFid(bp, &new_dvnode->fid); - bp = xdr_encode_string(bp, new_name, n_namesz); + bp = xdr_encode_YFSFid(bp, &orig_dvp->fid); + bp = xdr_encode_name(bp, orig_name); + bp = xdr_encode_YFSFid(bp, &new_dvp->fid); + bp = xdr_encode_name(bp, new_name); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1243,27 +1165,23 @@ static const struct afs_call_type yfs_RXYFSStoreData64 = { /* * Store a set of pages to a large file. */ -int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, - pgoff_t first, pgoff_t last, - unsigned offset, unsigned to, - struct afs_status_cb *scb) +void yfs_fs_store_data(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); loff_t size, pos, i_size; __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - size = (loff_t)to - (loff_t)offset; - if (first != last) - size += (loff_t)(last - first) << PAGE_SHIFT; - pos = (loff_t)first << PAGE_SHIFT; - pos += offset; + size = (loff_t)op->store.last_to - (loff_t)op->store.first_offset; + if (op->store.first != op->store.last) + size += (loff_t)(op->store.last - op->store.first) << PAGE_SHIFT; + pos = (loff_t)op->store.first << PAGE_SHIFT; + pos += op->store.first_offset; - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vp->vnode->vfs_inode); if (pos + size > i_size) i_size = size + pos; @@ -1271,7 +1189,7 @@ int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, (unsigned long long)size, (unsigned long long)pos, (unsigned long long)i_size); - call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64, sizeof(__be32) + sizeof(__be32) + sizeof(struct yfs_xdr_YFSFid) + @@ -1280,33 +1198,24 @@ int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->mapping = mapping; - call->first = first; - call->last = last; - call->first_offset = offset; - call->last_to = to; + return afs_op_nomem(op); + + call->key = op->key; call->send_pages = true; - call->out_scb = scb; /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSTOREDATA64); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); - bp = xdr_encode_YFSStoreStatus_mtime(bp, &vnode->vfs_inode.i_mtime); + bp = xdr_encode_YFSFid(bp, &vp->fid); + bp = xdr_encode_YFSStoreStatus_mtime(bp, &op->mtime); bp = xdr_encode_u64(bp, pos); bp = xdr_encode_u64(bp, size); bp = xdr_encode_u64(bp, i_size); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1330,18 +1239,17 @@ static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = { * Set the attributes on a file, using YFS.StoreData64 rather than * YFS.StoreStatus so as to alter the file size also. */ -static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr, - struct afs_status_cb *scb) +static void yfs_fs_setattr_size(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + struct iattr *attr = op->setattr.attr; __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64_as_Status, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64_as_Status, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSStoreStatus) + @@ -1349,72 +1257,59 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSTOREDATA64); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_YFS_StoreStatus(bp, attr); bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */ bp = xdr_encode_u64(bp, 0); /* size of write */ bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* * Set the attributes on a file, using YFS.StoreData64 if there's a change in * file size, and YFS.StoreStatus otherwise. */ -int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr, - struct afs_status_cb *scb) +void yfs_fs_setattr(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + struct iattr *attr = op->setattr.attr; __be32 *bp; if (attr->ia_valid & ATTR_SIZE) - return yfs_fs_setattr_size(fc, attr, scb); + return yfs_fs_setattr_size(op); _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreStatus, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSStoreStatus), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSTORESTATUS); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_YFS_StoreStatus(bp, attr); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1422,6 +1317,7 @@ int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr, */ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) { + struct afs_operation *op = call->op; const __be32 *bp; char *p; u32 size; @@ -1443,7 +1339,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) return ret; bp = call->buffer; - xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus); + xdr_decode_YFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); /* Fall through */ @@ -1457,8 +1353,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) call->count = ntohl(call->tmp); _debug("volname length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_volname_len); + return afs_protocol_error(call, afs_eproto_volname_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; @@ -1487,8 +1382,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) call->count = ntohl(call->tmp); _debug("offline msg length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_offline_msg_len); + return afs_protocol_error(call, afs_eproto_offline_msg_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; @@ -1518,8 +1412,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) call->count = ntohl(call->tmp); _debug("motd length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_motd_len); + return afs_protocol_error(call, afs_eproto_motd_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; @@ -1560,17 +1453,15 @@ static const struct afs_call_type yfs_RXYFSGetVolumeStatus = { /* * fetch the status of a volume */ -int yfs_fs_get_volume_status(struct afs_fs_cursor *fc, - struct afs_volume_status *vs) +void yfs_fs_get_volume_status(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(""); - call = afs_alloc_flat_call(net, &yfs_RXYFSGetVolumeStatus, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSGetVolumeStatus, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_u64), max_t(size_t, @@ -1578,23 +1469,17 @@ int yfs_fs_get_volume_status(struct afs_fs_cursor *fc, sizeof(__be32), AFSOPAQUEMAX + 1)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->out_volstatus = vs; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSGETVOLUMESTATUS); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_u64(bp, vnode->fid.vid); + bp = xdr_encode_u64(bp, vp->fid.vid); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1632,118 +1517,93 @@ static const struct afs_call_type yfs_RXYFSReleaseLock = { /* * Set a lock on a file */ -int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type, - struct afs_status_cb *scb) +void yfs_fs_set_lock(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(""); - call = afs_alloc_flat_call(net, &yfs_RXYFSSetLock, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSSetLock, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(__be32), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->lvnode = vnode; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSETLOCK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); - bp = xdr_encode_u32(bp, type); + bp = xdr_encode_YFSFid(bp, &vp->fid); + bp = xdr_encode_u32(bp, op->lock.type); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_calli(call, &vnode->fid, type); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_calli(call, &vp->fid, op->lock.type); + afs_make_op_call(op, call, GFP_NOFS); } /* * extend a lock on a file */ -int yfs_fs_extend_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb) +void yfs_fs_extend_lock(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(""); - call = afs_alloc_flat_call(net, &yfs_RXYFSExtendLock, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSExtendLock, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->lvnode = vnode; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSEXTENDLOCK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* * release a lock on a file */ -int yfs_fs_release_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb) +void yfs_fs_release_lock(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(""); - call = afs_alloc_flat_call(net, &yfs_RXYFSReleaseLock, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSReleaseLock, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) - return -ENOMEM; - - call->key = fc->key; - call->lvnode = vnode; - call->out_scb = scb; + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSRELEASELOCK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1759,45 +1619,33 @@ static const struct afs_call_type yfs_RXYFSFetchStatus = { /* * Fetch the status information for a fid without needing a vnode handle. */ -int yfs_fs_fetch_status(struct afs_fs_cursor *fc, - struct afs_net *net, - struct afs_fid *fid, - struct afs_status_cb *scb, - struct afs_volsync *volsync) +void yfs_fs_fetch_status(struct afs_operation *op) { + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), fid->vid, fid->vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); - if (!call) { - fc->ac.error = -ENOMEM; - return -ENOMEM; - } - - call->key = fc->key; - call->out_scb = scb; - call->out_volsync = volsync; + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHSTATUS); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, fid); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1805,6 +1653,7 @@ int yfs_fs_fetch_status(struct afs_fs_cursor *fc, */ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) { + struct afs_operation *op = call->op; struct afs_status_cb *scb; const __be32 *bp; u32 tmp; @@ -1826,10 +1675,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) return ret; tmp = ntohl(call->tmp); - _debug("status count: %u/%u", tmp, call->count2); - if (tmp != call->count2) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_ibulkst_count); + _debug("status count: %u/%u", tmp, op->nr_files); + if (tmp != op->nr_files) + return afs_protocol_error(call, afs_eproto_ibulkst_count); call->count = 0; call->unmarshall++; @@ -1843,14 +1691,23 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) if (ret < 0) return ret; + switch (call->count) { + case 0: + scb = &op->file[0].scb; + break; + case 1: + scb = &op->file[1].scb; + break; + default: + scb = &op->more_files[call->count - 2].scb; + break; + } + bp = call->buffer; - scb = &call->out_scb[call->count]; - ret = xdr_decode_YFSFetchStatus(&bp, call, scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, scb); call->count++; - if (call->count < call->count2) + if (call->count < op->nr_files) goto more_counts; call->count = 0; @@ -1867,9 +1724,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) tmp = ntohl(call->tmp); _debug("CB count: %u", tmp); - if (tmp != call->count2) - return afs_protocol_error(call, -EBADMSG, - afs_eproto_ibulkst_cb_count); + if (tmp != op->nr_files) + return afs_protocol_error(call, afs_eproto_ibulkst_cb_count); call->count = 0; call->unmarshall++; more_cbs: @@ -1883,11 +1739,22 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) return ret; _debug("unmarshall CB array"); + switch (call->count) { + case 0: + scb = &op->file[0].scb; + break; + case 1: + scb = &op->file[1].scb; + break; + default: + scb = &op->more_files[call->count - 2].scb; + break; + } + bp = call->buffer; - scb = &call->out_scb[call->count]; xdr_decode_YFSCallBack(&bp, call, scb); call->count++; - if (call->count < call->count2) + if (call->count < op->nr_files) goto more_cbs; afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync)); @@ -1900,7 +1767,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) return ret; bp = call->buffer; - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; /* Fall through */ @@ -1926,50 +1793,39 @@ static const struct afs_call_type yfs_RXYFSInlineBulkStatus = { /* * Fetch the status information for up to 1024 files */ -int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc, - struct afs_net *net, - struct afs_fid *fids, - struct afs_status_cb *statuses, - unsigned int nr_fids, - struct afs_volsync *volsync) +void yfs_fs_inline_bulk_status(struct afs_operation *op) { + struct afs_vnode_param *dvp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[1]; struct afs_call *call; __be32 *bp; int i; _enter(",%x,{%llx:%llu},%u", - key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids); + key_serial(op->key), vp->fid.vid, vp->fid.vnode, op->nr_files); - call = afs_alloc_flat_call(net, &yfs_RXYFSInlineBulkStatus, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSInlineBulkStatus, sizeof(__be32) + sizeof(__be32) + sizeof(__be32) + - sizeof(struct yfs_xdr_YFSFid) * nr_fids, + sizeof(struct yfs_xdr_YFSFid) * op->nr_files, sizeof(struct yfs_xdr_YFSFetchStatus)); - if (!call) { - fc->ac.error = -ENOMEM; - return -ENOMEM; - } - - call->key = fc->key; - call->out_scb = statuses; - call->out_volsync = volsync; - call->count2 = nr_fids; + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSINLINEBULKSTATUS); bp = xdr_encode_u32(bp, 0); /* RPCFlags */ - bp = xdr_encode_u32(bp, nr_fids); - for (i = 0; i < nr_fids; i++) - bp = xdr_encode_YFSFid(bp, &fids[i]); + bp = xdr_encode_u32(bp, op->nr_files); + bp = xdr_encode_YFSFid(bp, &dvp->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); + for (i = 0; i < op->nr_files - 2; i++) + bp = xdr_encode_YFSFid(bp, &op->more_files[i].fid); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &fids[0]); - afs_set_fc_call(call, fc); - afs_make_call(&fc->ac, call, GFP_NOFS); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_NOFS); } /* @@ -1977,7 +1833,9 @@ int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc, */ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) { - struct yfs_acl *yacl = call->out_yacl; + struct afs_operation *op = call->op; + struct afs_vnode_param *vp = &op->file[0]; + struct yfs_acl *yacl = op->yacl; struct afs_acl *acl; const __be32 *bp; unsigned int size; @@ -2067,10 +1925,8 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) bp = call->buffer; yacl->inherit_flag = ntohl(*bp++); yacl->num_cleaned = ntohl(*bp++); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - xdr_decode_YFSVolSync(&bp, call->out_volsync); + xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; /* Fall through */ @@ -2105,45 +1961,33 @@ static const struct afs_call_type yfs_RXYFSFetchOpaqueACL = { /* * Fetch the YFS advanced ACLs for a file. */ -struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *fc, - struct yfs_acl *yacl, - struct afs_status_cb *scb) +void yfs_fs_fetch_opaque_acl(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); - call = afs_alloc_flat_call(net, &yfs_RXYFSFetchOpaqueACL, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchOpaqueACL, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); - if (!call) { - fc->ac.error = -ENOMEM; - return ERR_PTR(-ENOMEM); - } - - call->key = fc->key; - call->out_yacl = yacl; - call->out_scb = scb; - call->out_volsync = NULL; + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHOPAQUEACL); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); - afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); - afs_make_call(&fc->ac, call, GFP_KERNEL); - return (struct yfs_acl *)afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_KERNEL); } /* @@ -2159,46 +2003,38 @@ static const struct afs_call_type yfs_RXYFSStoreOpaqueACL2 = { /* * Fetch the YFS ACL for a file. */ -int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl, - struct afs_status_cb *scb) +void yfs_fs_store_opaque_acl2(struct afs_operation *op) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + struct afs_acl *acl = op->acl; size_t size; __be32 *bp; _enter(",%x,{%llx:%llu},,", - key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + key_serial(op->key), vp->fid.vid, vp->fid.vnode); size = round_up(acl->size, 4); - call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2, + call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreOpaqueACL2, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(__be32) + size, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); - if (!call) { - fc->ac.error = -ENOMEM; - return -ENOMEM; - } - - call->key = fc->key; - call->out_scb = scb; - call->out_volsync = NULL; + if (!call) + return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSTOREOPAQUEACL2); bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_u32(bp, acl->size); memcpy(bp, acl->data, acl->size); if (acl->size != size) memset((void *)bp + acl->size, 0, size - acl->size); yfs_check_req(call, bp); - trace_afs_make_fs_call(call, &vnode->fid); - afs_make_call(&fc->ac, call, GFP_KERNEL); - return afs_wait_for_call_to_complete(call, &fc->ac); + trace_afs_make_fs_call(call, &vp->fid); + afs_make_op_call(op, call, GFP_KERNEL); } @@ -176,6 +176,7 @@ struct fsync_iocb { struct file *file; struct work_struct work; bool datasync; + struct cred *creds; }; struct poll_iocb { @@ -1589,8 +1590,11 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb, static void aio_fsync_work(struct work_struct *work) { struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); + const struct cred *old_cred = override_creds(iocb->fsync.creds); iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); + revert_creds(old_cred); + put_cred(iocb->fsync.creds); iocb_put(iocb); } @@ -1604,6 +1608,10 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, if (unlikely(!req->file->f_op->fsync)) return -EINVAL; + req->creds = prepare_creds(); + if (!req->creds) + return -ENOMEM; + req->datasync = datasync; INIT_WORK(&req->work, aio_fsync_work); schedule_work(&req->work); diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 8035d2a44561..54f0ce444272 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -15,6 +15,7 @@ #include <linux/time.h> #include <linux/namei.h> #include <linux/poll.h> +#include <linux/fiemap.h> static int bad_file_open(struct inode *inode, struct file *filp) { diff --git a/fs/bfs/Kconfig b/fs/bfs/Kconfig index 3e1247f07913..3a757805b585 100644 --- a/fs/bfs/Kconfig +++ b/fs/bfs/Kconfig @@ -11,7 +11,7 @@ config BFS_FS on your /stand slice from within Linux. You then also need to say Y to "UnixWare slices support", below. More information about the BFS file system is contained in the file - <file:Documentation/filesystems/bfs.txt>. + <file:Documentation/filesystems/bfs.rst>. If you don't know what this is about, say N. diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 8e8346a81723..3e84e9bb9084 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -151,7 +151,7 @@ static int load_aout_binary(struct linux_binprm * bprm) return -ENOMEM; /* Flush all traces of the currently running executable */ - retval = flush_old_exec(bprm); + retval = begin_new_exec(bprm); if (retval) return retval; @@ -174,7 +174,6 @@ static int load_aout_binary(struct linux_binprm * bprm) if (retval < 0) return retval; - install_exec_creds(bprm); if (N_MAGIC(ex) == OMAGIC) { unsigned long text_addr, map_size; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 25d489bc9453..e5d50bd880e9 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -40,12 +40,18 @@ #include <linux/sched/coredump.h> #include <linux/sched/task_stack.h> #include <linux/sched/cputime.h> +#include <linux/sizes.h> +#include <linux/types.h> #include <linux/cred.h> #include <linux/dax.h> #include <linux/uaccess.h> #include <asm/param.h> #include <asm/page.h> +#ifndef ELF_COMPAT +#define ELF_COMPAT 0 +#endif + #ifndef user_long_t #define user_long_t long #endif @@ -273,8 +279,8 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, NEW_AUX_ENT(AT_BASE_PLATFORM, (elf_addr_t)(unsigned long)u_base_platform); } - if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { - NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); + if (bprm->have_execfd) { + NEW_AUX_ENT(AT_EXECFD, bprm->execfd); } #undef NEW_AUX_ENT /* AT_NULL is zero; clear the rest too */ @@ -347,8 +353,6 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, return 0; } -#ifndef elf_map - static unsigned long elf_map(struct file *filep, unsigned long addr, const struct elf_phdr *eppnt, int prot, int type, unsigned long total_size) @@ -388,8 +392,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, return(map_addr); } -#endif /* !elf_map */ - static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr) { int i, first_idx = -1, last_idx = -1; @@ -539,7 +541,8 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp, #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */ -static inline int make_prot(u32 p_flags) +static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state, + bool has_interp, bool is_interp) { int prot = 0; @@ -549,7 +552,8 @@ static inline int make_prot(u32 p_flags) prot |= PROT_WRITE; if (p_flags & PF_X) prot |= PROT_EXEC; - return prot; + + return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp); } /* This is much more generalized than the library routine read function, @@ -559,7 +563,8 @@ static inline int make_prot(u32 p_flags) static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, struct file *interpreter, - unsigned long no_base, struct elf_phdr *interp_elf_phdata) + unsigned long no_base, struct elf_phdr *interp_elf_phdata, + struct arch_elf_state *arch_state) { struct elf_phdr *eppnt; unsigned long load_addr = 0; @@ -591,7 +596,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { if (eppnt->p_type == PT_LOAD) { int elf_type = MAP_PRIVATE | MAP_DENYWRITE; - int elf_prot = make_prot(eppnt->p_flags); + int elf_prot = make_prot(eppnt->p_flags, arch_state, + true, true); unsigned long vaddr = 0; unsigned long k, map_addr; @@ -682,6 +688,111 @@ out: * libraries. There is no binary dependent code anywhere else. */ +static int parse_elf_property(const char *data, size_t *off, size_t datasz, + struct arch_elf_state *arch, + bool have_prev_type, u32 *prev_type) +{ + size_t o, step; + const struct gnu_property *pr; + int ret; + + if (*off == datasz) + return -ENOENT; + + if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN)) + return -EIO; + o = *off; + datasz -= *off; + + if (datasz < sizeof(*pr)) + return -ENOEXEC; + pr = (const struct gnu_property *)(data + o); + o += sizeof(*pr); + datasz -= sizeof(*pr); + + if (pr->pr_datasz > datasz) + return -ENOEXEC; + + WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN); + step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN); + if (step > datasz) + return -ENOEXEC; + + /* Properties are supposed to be unique and sorted on pr_type: */ + if (have_prev_type && pr->pr_type <= *prev_type) + return -ENOEXEC; + *prev_type = pr->pr_type; + + ret = arch_parse_elf_property(pr->pr_type, data + o, + pr->pr_datasz, ELF_COMPAT, arch); + if (ret) + return ret; + + *off = o + step; + return 0; +} + +#define NOTE_DATA_SZ SZ_1K +#define GNU_PROPERTY_TYPE_0_NAME "GNU" +#define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME)) + +static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr, + struct arch_elf_state *arch) +{ + union { + struct elf_note nhdr; + char data[NOTE_DATA_SZ]; + } note; + loff_t pos; + ssize_t n; + size_t off, datasz; + int ret; + bool have_prev_type; + u32 prev_type; + + if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr) + return 0; + + /* load_elf_binary() shouldn't call us unless this is true... */ + if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY)) + return -ENOEXEC; + + /* If the properties are crazy large, that's too bad (for now): */ + if (phdr->p_filesz > sizeof(note)) + return -ENOEXEC; + + pos = phdr->p_offset; + n = kernel_read(f, ¬e, phdr->p_filesz, &pos); + + BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ); + if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ) + return -EIO; + + if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || + note.nhdr.n_namesz != NOTE_NAME_SZ || + strncmp(note.data + sizeof(note.nhdr), + GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr))) + return -ENOEXEC; + + off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ, + ELF_GNU_PROPERTY_ALIGN); + if (off > n) + return -ENOEXEC; + + if (note.nhdr.n_descsz > n - off) + return -ENOEXEC; + datasz = off + note.nhdr.n_descsz; + + have_prev_type = false; + do { + ret = parse_elf_property(note.data, &off, datasz, arch, + have_prev_type, &prev_type); + have_prev_type = true; + } while (!ret); + + return ret == -ENOENT ? 0 : ret; +} + static int load_elf_binary(struct linux_binprm *bprm) { struct file *interpreter = NULL; /* to shut gcc up */ @@ -689,6 +800,7 @@ static int load_elf_binary(struct linux_binprm *bprm) int load_addr_set = 0; unsigned long error; struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; + struct elf_phdr *elf_property_phdata = NULL; unsigned long elf_bss, elf_brk; int bss_prot = 0; int retval, i; @@ -726,6 +838,11 @@ static int load_elf_binary(struct linux_binprm *bprm) for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) { char *elf_interpreter; + if (elf_ppnt->p_type == PT_GNU_PROPERTY) { + elf_property_phdata = elf_ppnt; + continue; + } + if (elf_ppnt->p_type != PT_INTERP) continue; @@ -819,9 +936,14 @@ out_free_interp: goto out_free_dentry; /* Pass PT_LOPROC..PT_HIPROC headers to arch code */ + elf_property_phdata = NULL; elf_ppnt = interp_elf_phdata; for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++) switch (elf_ppnt->p_type) { + case PT_GNU_PROPERTY: + elf_property_phdata = elf_ppnt; + break; + case PT_LOPROC ... PT_HIPROC: retval = arch_elf_pt_proc(interp_elf_ex, elf_ppnt, interpreter, @@ -832,6 +954,11 @@ out_free_interp: } } + retval = parse_elf_properties(interpreter ?: bprm->file, + elf_property_phdata, &arch_state); + if (retval) + goto out_free_dentry; + /* * Allow arch code to reject the ELF at this point, whilst it's * still possible to return an error to the code that invoked @@ -844,7 +971,7 @@ out_free_interp: goto out_free_dentry; /* Flush all traces of the currently running executable */ - retval = flush_old_exec(bprm); + retval = begin_new_exec(bprm); if (retval) goto out_free_dentry; @@ -858,7 +985,6 @@ out_free_interp: current->flags |= PF_RANDOMIZE; setup_new_exec(bprm); - install_exec_creds(bprm); /* Do this so that we can load the interpreter, if need be. We will change some of these later */ @@ -913,7 +1039,8 @@ out_free_interp: } } - elf_prot = make_prot(elf_ppnt->p_flags); + elf_prot = make_prot(elf_ppnt->p_flags, &arch_state, + !!interpreter, false); elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; @@ -1056,7 +1183,8 @@ out_free_interp: if (interpreter) { elf_entry = load_elf_interp(interp_elf_ex, interpreter, - load_bias, interp_elf_phdata); + load_bias, interp_elf_phdata, + &arch_state); if (!IS_ERR((void *)elf_entry)) { /* * load_elf_interp() returns relocation @@ -1355,7 +1483,6 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { u32 __user *header = (u32 __user *) vma->vm_start; u32 word; - mm_segment_t fs = get_fs(); /* * Doing it this way gets the constant folded by GCC. */ @@ -1368,14 +1495,8 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, magic.elfmag[EI_MAG1] = ELFMAG1; magic.elfmag[EI_MAG2] = ELFMAG2; magic.elfmag[EI_MAG3] = ELFMAG3; - /* - * Switch to the user "segment" for get_user(), - * then put back what elf_core_dump() had in place. - */ - set_fs(USER_DS); if (unlikely(get_user(word, header))) word = 0; - set_fs(fs); if (word == magic.cmp) return PAGE_SIZE; } @@ -1556,10 +1677,7 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, const kernel_siginfo_t *siginfo) { - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); - copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo); - set_fs(old_fs); + copy_siginfo_to_external(csigdata, siginfo); fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata); } @@ -2186,7 +2304,6 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, static int elf_core_dump(struct coredump_params *cprm) { int has_dumped = 0; - mm_segment_t fs; int segs, i; size_t vma_data_size = 0; struct vm_area_struct *vma, *gate_vma; @@ -2235,13 +2352,10 @@ static int elf_core_dump(struct coredump_params *cprm) * notes. This also sets up the file header. */ if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs)) - goto cleanup; + goto end_coredump; has_dumped = 1; - fs = get_fs(); - set_fs(KERNEL_DS); - offset += sizeof(elf); /* Elf header */ offset += segs * sizeof(struct elf_phdr); /* Program headers */ @@ -2369,9 +2483,6 @@ static int elf_core_dump(struct coredump_params *cprm) } end_coredump: - set_fs(fs); - -cleanup: free_note_info(&info); kfree(shdr4extnum); kvfree(vma_filesz); diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 240f66663543..aaf332d32326 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -338,7 +338,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP; /* flush all traces of the currently running executable */ - retval = flush_old_exec(bprm); + retval = begin_new_exec(bprm); if (retval) goto error; @@ -434,7 +434,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) current->mm->start_stack = current->mm->start_brk + stack_size; #endif - install_exec_creds(bprm); if (create_elf_fdpic_tables(bprm, current->mm, &exec_params, &interp_params) < 0) goto error; @@ -589,7 +588,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0) + (k_base_platform ? 1 : 0) + AT_VECTOR_SIZE_ARCH; - if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) + if (bprm->have_execfd) nitems++; csp = sp; @@ -629,10 +628,10 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, (elf_addr_t) (unsigned long) u_base_platform); } - if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { + if (bprm->have_execfd) { nr = 0; csp -= 2 * sizeof(unsigned long); - NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); + NEW_AUX_ENT(AT_EXECFD, bprm->execfd); } nr = 0; @@ -1549,7 +1548,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) { #define NUM_NOTES 6 int has_dumped = 0; - mm_segment_t fs; int segs; int i; struct vm_area_struct *vma; @@ -1589,31 +1587,31 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) /* alloc memory for large data structures: too large to be on stack */ elf = kmalloc(sizeof(*elf), GFP_KERNEL); if (!elf) - goto cleanup; + goto end_coredump; prstatus = kzalloc(sizeof(*prstatus), GFP_KERNEL); if (!prstatus) - goto cleanup; + goto end_coredump; psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); if (!psinfo) - goto cleanup; + goto end_coredump; notes = kmalloc_array(NUM_NOTES, sizeof(struct memelfnote), GFP_KERNEL); if (!notes) - goto cleanup; + goto end_coredump; fpu = kmalloc(sizeof(*fpu), GFP_KERNEL); if (!fpu) - goto cleanup; + goto end_coredump; #ifdef ELF_CORE_COPY_XFPREGS xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL); if (!xfpu) - goto cleanup; + goto end_coredump; #endif for (ct = current->mm->core_state->dumper.next; ct; ct = ct->next) { tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) - goto cleanup; + goto end_coredump; tmp->thread = ct->task; list_add(&tmp->list, &thread_list); @@ -1678,9 +1676,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu); #endif - fs = get_fs(); - set_fs(KERNEL_DS); - offset += sizeof(*elf); /* Elf header */ offset += segs * sizeof(struct elf_phdr); /* Program headers */ @@ -1788,9 +1783,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) } end_coredump: - set_fs(fs); - -cleanup: while (!list_empty(&thread_list)) { struct list_head *tmp = thread_list.next; list_del(tmp); diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c index 466497860c62..06b9b9fddf70 100644 --- a/fs/binfmt_em86.c +++ b/fs/binfmt_em86.c @@ -48,10 +48,6 @@ static int load_em86(struct linux_binprm *bprm) if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) return -ENOENT; - allow_write_access(bprm->file); - fput(bprm->file); - bprm->file = NULL; - /* Unlike in the script case, we don't have to do any hairy * parsing to find our interpreter... it's hardcoded! */ @@ -68,15 +64,15 @@ static int load_em86(struct linux_binprm *bprm) * user environment and arguments are stored. */ remove_arg_zero(bprm); - retval = copy_strings_kernel(1, &bprm->filename, bprm); + retval = copy_string_kernel(bprm->filename, bprm); if (retval < 0) return retval; bprm->argc++; if (i_arg) { - retval = copy_strings_kernel(1, &i_arg, bprm); + retval = copy_string_kernel(i_arg, bprm); if (retval < 0) return retval; bprm->argc++; } - retval = copy_strings_kernel(1, &i_name, bprm); + retval = copy_string_kernel(i_name, bprm); if (retval < 0) return retval; bprm->argc++; @@ -89,13 +85,8 @@ static int load_em86(struct linux_binprm *bprm) if (IS_ERR(file)) return PTR_ERR(file); - bprm->file = file; - - retval = prepare_binprm(bprm); - if (retval < 0) - return retval; - - return search_binary_handler(bprm); + bprm->interpreter = file; + return 0; } static struct linux_binfmt em86_format = { diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 831a2b25ba79..9b82bc111d0a 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -534,7 +534,7 @@ static int load_flat_file(struct linux_binprm *bprm, /* Flush all traces of the currently running executable */ if (id == 0) { - ret = flush_old_exec(bprm); + ret = begin_new_exec(bprm); if (ret) goto err; @@ -963,8 +963,6 @@ static int load_flat_binary(struct linux_binprm *bprm) } } - install_exec_creds(bprm); - set_binfmt(&flat_format); #ifdef CONFIG_MMU diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index cdb45829354d..3880a82da1dc 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -134,7 +134,6 @@ static int load_misc_binary(struct linux_binprm *bprm) Node *fmt; struct file *interp_file = NULL; int retval; - int fd_binary = -1; retval = -ENOEXEC; if (!enabled) @@ -160,51 +159,25 @@ static int load_misc_binary(struct linux_binprm *bprm) goto ret; } - if (fmt->flags & MISC_FMT_OPEN_BINARY) { + if (fmt->flags & MISC_FMT_OPEN_BINARY) + bprm->have_execfd = 1; - /* if the binary should be opened on behalf of the - * interpreter than keep it open and assign descriptor - * to it - */ - fd_binary = get_unused_fd_flags(0); - if (fd_binary < 0) { - retval = fd_binary; - goto ret; - } - fd_install(fd_binary, bprm->file); - - /* if the binary is not readable than enforce mm->dumpable=0 - regardless of the interpreter's permissions */ - would_dump(bprm, bprm->file); - - allow_write_access(bprm->file); - bprm->file = NULL; - - /* mark the bprm that fd should be passed to interp */ - bprm->interp_flags |= BINPRM_FLAGS_EXECFD; - bprm->interp_data = fd_binary; - - } else { - allow_write_access(bprm->file); - fput(bprm->file); - bprm->file = NULL; - } /* make argv[1] be the path to the binary */ - retval = copy_strings_kernel(1, &bprm->interp, bprm); + retval = copy_string_kernel(bprm->interp, bprm); if (retval < 0) - goto error; + goto ret; bprm->argc++; /* add the interp as argv[0] */ - retval = copy_strings_kernel(1, &fmt->interpreter, bprm); + retval = copy_string_kernel(fmt->interpreter, bprm); if (retval < 0) - goto error; + goto ret; bprm->argc++; /* Update interp in case binfmt_script needs it. */ retval = bprm_change_interp(fmt->interpreter, bprm); if (retval < 0) - goto error; + goto ret; if (fmt->flags & MISC_FMT_OPEN_FILE) { interp_file = file_clone_open(fmt->interp_file); @@ -215,38 +188,16 @@ static int load_misc_binary(struct linux_binprm *bprm) } retval = PTR_ERR(interp_file); if (IS_ERR(interp_file)) - goto error; - - bprm->file = interp_file; - if (fmt->flags & MISC_FMT_CREDENTIALS) { - loff_t pos = 0; - - /* - * No need to call prepare_binprm(), it's already been - * done. bprm->buf is stale, update from interp_file. - */ - memset(bprm->buf, 0, BINPRM_BUF_SIZE); - retval = kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, - &pos); - } else - retval = prepare_binprm(bprm); - - if (retval < 0) - goto error; + goto ret; - retval = search_binary_handler(bprm); - if (retval < 0) - goto error; + bprm->interpreter = interp_file; + if (fmt->flags & MISC_FMT_CREDENTIALS) + bprm->execfd_creds = 1; + retval = 0; ret: dput(fmt->dentry); return retval; -error: - if (fd_binary > 0) - ksys_close(fd_binary); - bprm->interp_flags = 0; - bprm->interp_data = 0; - goto ret; } /* Command parsers */ diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index e9e6a6f4a35f..1b6625e95958 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -16,14 +16,14 @@ #include <linux/fs.h> static inline bool spacetab(char c) { return c == ' ' || c == '\t'; } -static inline char *next_non_spacetab(char *first, const char *last) +static inline const char *next_non_spacetab(const char *first, const char *last) { for (; first <= last; first++) if (!spacetab(*first)) return first; return NULL; } -static inline char *next_terminator(char *first, const char *last) +static inline const char *next_terminator(const char *first, const char *last) { for (; first <= last; first++) if (spacetab(*first) || !*first) @@ -33,8 +33,7 @@ static inline char *next_terminator(char *first, const char *last) static int load_script(struct linux_binprm *bprm) { - const char *i_arg, *i_name; - char *cp, *buf_end; + const char *i_name, *i_sep, *i_arg, *i_end, *buf_end; struct file *file; int retval; @@ -43,20 +42,6 @@ static int load_script(struct linux_binprm *bprm) return -ENOEXEC; /* - * If the script filename will be inaccessible after exec, typically - * because it is a "/dev/fd/<fd>/.." path against an O_CLOEXEC fd, give - * up now (on the assumption that the interpreter will want to load - * this file). - */ - if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) - return -ENOENT; - - /* Release since we are not mapping a binary into memory. */ - allow_write_access(bprm->file); - fput(bprm->file); - bprm->file = NULL; - - /* * This section handles parsing the #! line into separate * interpreter path and argument strings. We must be careful * because bprm->buf is not yet guaranteed to be NUL-terminated @@ -71,39 +56,43 @@ static int load_script(struct linux_binprm *bprm) * parse them on its own. */ buf_end = bprm->buf + sizeof(bprm->buf) - 1; - cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n'); - if (!cp) { - cp = next_non_spacetab(bprm->buf + 2, buf_end); - if (!cp) + i_end = strnchr(bprm->buf, sizeof(bprm->buf), '\n'); + if (!i_end) { + i_end = next_non_spacetab(bprm->buf + 2, buf_end); + if (!i_end) return -ENOEXEC; /* Entire buf is spaces/tabs */ /* * If there is no later space/tab/NUL we must assume the * interpreter path is truncated. */ - if (!next_terminator(cp, buf_end)) + if (!next_terminator(i_end, buf_end)) return -ENOEXEC; - cp = buf_end; + i_end = buf_end; } - /* NUL-terminate the buffer and any trailing spaces/tabs. */ - *cp = '\0'; - while (cp > bprm->buf) { - cp--; - if ((*cp == ' ') || (*cp == '\t')) - *cp = '\0'; - else - break; - } - for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); - if (*cp == '\0') + /* Trim any trailing spaces/tabs from i_end */ + while (spacetab(i_end[-1])) + i_end--; + + /* Skip over leading spaces/tabs */ + i_name = next_non_spacetab(bprm->buf+2, i_end); + if (!i_name || (i_name == i_end)) return -ENOEXEC; /* No interpreter name found */ - i_name = cp; + + /* Is there an optional argument? */ i_arg = NULL; - for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++) - /* nothing */ ; - while ((*cp == ' ') || (*cp == '\t')) - *cp++ = '\0'; - if (*cp) - i_arg = cp; + i_sep = next_terminator(i_name, i_end); + if (i_sep && (*i_sep != '\0')) + i_arg = next_non_spacetab(i_sep, i_end); + + /* + * If the script filename will be inaccessible after exec, typically + * because it is a "/dev/fd/<fd>/.." path against an O_CLOEXEC fd, give + * up now (on the assumption that the interpreter will want to load + * this file). + */ + if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) + return -ENOENT; + /* * OK, we've parsed out the interpreter name and * (optional) argument. @@ -117,17 +106,19 @@ static int load_script(struct linux_binprm *bprm) retval = remove_arg_zero(bprm); if (retval) return retval; - retval = copy_strings_kernel(1, &bprm->interp, bprm); + retval = copy_string_kernel(bprm->interp, bprm); if (retval < 0) return retval; bprm->argc++; + *((char *)i_end) = '\0'; if (i_arg) { - retval = copy_strings_kernel(1, &i_arg, bprm); + *((char *)i_sep) = '\0'; + retval = copy_string_kernel(i_arg, bprm); if (retval < 0) return retval; bprm->argc++; } - retval = copy_strings_kernel(1, &i_name, bprm); + retval = copy_string_kernel(i_name, bprm); if (retval) return retval; bprm->argc++; @@ -142,11 +133,8 @@ static int load_script(struct linux_binprm *bprm) if (IS_ERR(file)) return PTR_ERR(file); - bprm->file = file; - retval = prepare_binprm(bprm); - if (retval < 0) - return retval; - return search_binary_handler(bprm); + bprm->interpreter = file; + return 0; } static struct linux_binfmt script_format = { diff --git a/fs/block_dev.c b/fs/block_dev.c index 93672c3f1c78..47860e589388 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -255,7 +255,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, break; if (!(iocb->ki_flags & IOCB_HIPRI) || !blk_poll(bdev_get_queue(bdev), qc, true)) - io_schedule(); + blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -449,7 +449,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) if (!(iocb->ki_flags & IOCB_HIPRI) || !blk_poll(bdev_get_queue(bdev), qc, true)) - io_schedule(); + blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -614,10 +614,9 @@ static int blkdev_readpage(struct file * file, struct page * page) return block_read_full_page(page, blkdev_get_block); } -static int blkdev_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void blkdev_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block); + mpage_readahead(rac, blkdev_get_block); } static int blkdev_write_begin(struct file *file, struct address_space *mapping, @@ -672,7 +671,7 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) * i_mutex and doing so causes performance issues with concurrent * O_SYNC writers to a block device. */ - error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL); + error = blkdev_issue_flush(bdev, GFP_KERNEL); if (error == -EOPNOTSUPP) error = 0; @@ -713,7 +712,6 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, blk_queue_exit(bdev->bd_queue); return result; } -EXPORT_SYMBOL_GPL(bdev_read_page); /** * bdev_write_page() - Start writing a page to a block device @@ -758,7 +756,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, blk_queue_exit(bdev->bd_queue); return result; } -EXPORT_SYMBOL_GPL(bdev_write_page); /* * pseudo-fs @@ -882,21 +879,6 @@ static int bdev_set(struct inode *inode, void *data) static LIST_HEAD(all_bdevs); -/* - * If there is a bdev inode for this device, unhash it so that it gets evicted - * as soon as last inode reference is dropped. - */ -void bdev_unhash_inode(dev_t dev) -{ - struct inode *inode; - - inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev); - if (inode) { - remove_inode_hash(inode); - iput(inode); - } -} - struct block_device *bdget(dev_t dev) { struct block_device *bdev; @@ -1516,7 +1498,7 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate) lockdep_assert_held(&bdev->bd_mutex); rescan: - ret = blk_drop_partitions(disk, bdev); + ret = blk_drop_partitions(bdev); if (ret) return ret; @@ -2023,8 +2005,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) if (bdev_read_only(I_BDEV(bd_inode))) return -EPERM; - /* uswsusp needs write permission to the swap */ - if (IS_SWAPFILE(bd_inode) && !hibernation_available()) + if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode)) return -ETXTBSY; if (!iov_iter_count(from)) @@ -2085,7 +2066,7 @@ static int blkdev_writepages(struct address_space *mapping, static const struct address_space_operations def_blk_aops = { .readpage = blkdev_readpage, - .readpages = blkdev_readpages, + .readahead = blkdev_readahead, .writepage = blkdev_writepage, .write_begin = blkdev_write_begin, .write_end = blkdev_write_end, @@ -2183,18 +2164,6 @@ const struct file_operations def_blk_fops = { .fallocate = blkdev_fallocate, }; -int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) -{ - int res; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); - res = blkdev_ioctl(bdev, 0, cmd, arg); - set_fs(old_fs); - return res; -} - -EXPORT_SYMBOL(ioctl_by_bdev); - /** * lookup_bdev - lookup a struct block_device by name * @pathname: special file representing the block device diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 575636f6491e..68b95ad82126 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -14,6 +14,7 @@ config BTRFS_FS select LZO_DECOMPRESS select ZSTD_COMPRESS select ZSTD_DECOMPRESS + select FS_IOMAP select RAID6_PQ select XOR_BLOCKS select SRCU diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 0cc02577577b..d888e71e66b6 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -13,6 +13,7 @@ #include "transaction.h" #include "delayed-ref.h" #include "locking.h" +#include "misc.h" /* Just an arbitrary number so we can be sure this happened */ #define BACKREF_FOUND_SHARED 6 @@ -537,18 +538,13 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, const u64 *extent_item_pos, bool ignore_offset) { struct btrfs_root *root; - struct btrfs_key root_key; struct extent_buffer *eb; int ret = 0; int root_level; int level = ref->level; struct btrfs_key search_key = ref->key_for_search; - root_key.objectid = ref->root_id; - root_key.type = BTRFS_ROOT_ITEM_KEY; - root_key.offset = (u64)-1; - - root = btrfs_get_fs_root(fs_info, &root_key, false); + root = btrfs_get_fs_root(fs_info, ref->root_id, false); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out_free; @@ -2295,3 +2291,832 @@ void free_ipath(struct inode_fs_paths *ipath) kvfree(ipath->fspath); kfree(ipath); } + +struct btrfs_backref_iter *btrfs_backref_iter_alloc( + struct btrfs_fs_info *fs_info, gfp_t gfp_flag) +{ + struct btrfs_backref_iter *ret; + + ret = kzalloc(sizeof(*ret), gfp_flag); + if (!ret) + return NULL; + + ret->path = btrfs_alloc_path(); + if (!ret) { + kfree(ret); + return NULL; + } + + /* Current backref iterator only supports iteration in commit root */ + ret->path->search_commit_root = 1; + ret->path->skip_locking = 1; + ret->fs_info = fs_info; + + return ret; +} + +int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) +{ + struct btrfs_fs_info *fs_info = iter->fs_info; + struct btrfs_path *path = iter->path; + struct btrfs_extent_item *ei; + struct btrfs_key key; + int ret; + + key.objectid = bytenr; + key.type = BTRFS_METADATA_ITEM_KEY; + key.offset = (u64)-1; + iter->bytenr = bytenr; + + ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); + if (ret < 0) + return ret; + if (ret == 0) { + ret = -EUCLEAN; + goto release; + } + if (path->slots[0] == 0) { + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + ret = -EUCLEAN; + goto release; + } + path->slots[0]--; + + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); + if ((key.type != BTRFS_EXTENT_ITEM_KEY && + key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) { + ret = -ENOENT; + goto release; + } + memcpy(&iter->cur_key, &key, sizeof(key)); + iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], + path->slots[0]); + iter->end_ptr = (u32)(iter->item_ptr + + btrfs_item_size_nr(path->nodes[0], path->slots[0])); + ei = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_extent_item); + + /* + * Only support iteration on tree backref yet. + * + * This is an extra precaution for non skinny-metadata, where + * EXTENT_ITEM is also used for tree blocks, that we can only use + * extent flags to determine if it's a tree block. + */ + if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { + ret = -ENOTSUPP; + goto release; + } + iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); + + /* If there is no inline backref, go search for keyed backref */ + if (iter->cur_ptr >= iter->end_ptr) { + ret = btrfs_next_item(fs_info->extent_root, path); + + /* No inline nor keyed ref */ + if (ret > 0) { + ret = -ENOENT; + goto release; + } + if (ret < 0) + goto release; + + btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, + path->slots[0]); + if (iter->cur_key.objectid != bytenr || + (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && + iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { + ret = -ENOENT; + goto release; + } + iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], + path->slots[0]); + iter->item_ptr = iter->cur_ptr; + iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr( + path->nodes[0], path->slots[0])); + } + + return 0; +release: + btrfs_backref_iter_release(iter); + return ret; +} + +/* + * Go to the next backref item of current bytenr, can be either inlined or + * keyed. + * + * Caller needs to check whether it's inline ref or not by iter->cur_key. + * + * Return 0 if we get next backref without problem. + * Return >0 if there is no extra backref for this bytenr. + * Return <0 if there is something wrong happened. + */ +int btrfs_backref_iter_next(struct btrfs_backref_iter *iter) +{ + struct extent_buffer *eb = btrfs_backref_get_eb(iter); + struct btrfs_path *path = iter->path; + struct btrfs_extent_inline_ref *iref; + int ret; + u32 size; + + if (btrfs_backref_iter_is_inline_ref(iter)) { + /* We're still inside the inline refs */ + ASSERT(iter->cur_ptr < iter->end_ptr); + + if (btrfs_backref_has_tree_block_info(iter)) { + /* First tree block info */ + size = sizeof(struct btrfs_tree_block_info); + } else { + /* Use inline ref type to determine the size */ + int type; + + iref = (struct btrfs_extent_inline_ref *) + ((unsigned long)iter->cur_ptr); + type = btrfs_extent_inline_ref_type(eb, iref); + + size = btrfs_extent_inline_ref_size(type); + } + iter->cur_ptr += size; + if (iter->cur_ptr < iter->end_ptr) + return 0; + + /* All inline items iterated, fall through */ + } + + /* We're at keyed items, there is no inline item, go to the next one */ + ret = btrfs_next_item(iter->fs_info->extent_root, iter->path); + if (ret) + return ret; + + btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); + if (iter->cur_key.objectid != iter->bytenr || + (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && + iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) + return 1; + iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], + path->slots[0]); + iter->cur_ptr = iter->item_ptr; + iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0], + path->slots[0]); + return 0; +} + +void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, + struct btrfs_backref_cache *cache, int is_reloc) +{ + int i; + + cache->rb_root = RB_ROOT; + for (i = 0; i < BTRFS_MAX_LEVEL; i++) + INIT_LIST_HEAD(&cache->pending[i]); + INIT_LIST_HEAD(&cache->changed); + INIT_LIST_HEAD(&cache->detached); + INIT_LIST_HEAD(&cache->leaves); + INIT_LIST_HEAD(&cache->pending_edge); + INIT_LIST_HEAD(&cache->useless_node); + cache->fs_info = fs_info; + cache->is_reloc = is_reloc; +} + +struct btrfs_backref_node *btrfs_backref_alloc_node( + struct btrfs_backref_cache *cache, u64 bytenr, int level) +{ + struct btrfs_backref_node *node; + + ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL); + node = kzalloc(sizeof(*node), GFP_NOFS); + if (!node) + return node; + + INIT_LIST_HEAD(&node->list); + INIT_LIST_HEAD(&node->upper); + INIT_LIST_HEAD(&node->lower); + RB_CLEAR_NODE(&node->rb_node); + cache->nr_nodes++; + node->level = level; + node->bytenr = bytenr; + + return node; +} + +struct btrfs_backref_edge *btrfs_backref_alloc_edge( + struct btrfs_backref_cache *cache) +{ + struct btrfs_backref_edge *edge; + + edge = kzalloc(sizeof(*edge), GFP_NOFS); + if (edge) + cache->nr_edges++; + return edge; +} + +/* + * Drop the backref node from cache, also cleaning up all its + * upper edges and any uncached nodes in the path. + * + * This cleanup happens bottom up, thus the node should either + * be the lowest node in the cache or a detached node. + */ +void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, + struct btrfs_backref_node *node) +{ + struct btrfs_backref_node *upper; + struct btrfs_backref_edge *edge; + + if (!node) + return; + + BUG_ON(!node->lowest && !node->detached); + while (!list_empty(&node->upper)) { + edge = list_entry(node->upper.next, struct btrfs_backref_edge, + list[LOWER]); + upper = edge->node[UPPER]; + list_del(&edge->list[LOWER]); + list_del(&edge->list[UPPER]); + btrfs_backref_free_edge(cache, edge); + + if (RB_EMPTY_NODE(&upper->rb_node)) { + BUG_ON(!list_empty(&node->upper)); + btrfs_backref_drop_node(cache, node); + node = upper; + node->lowest = 1; + continue; + } + /* + * Add the node to leaf node list if no other child block + * cached. + */ + if (list_empty(&upper->lower)) { + list_add_tail(&upper->lower, &cache->leaves); + upper->lowest = 1; + } + } + + btrfs_backref_drop_node(cache, node); +} + +/* + * Release all nodes/edges from current cache + */ +void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) +{ + struct btrfs_backref_node *node; + int i; + + while (!list_empty(&cache->detached)) { + node = list_entry(cache->detached.next, + struct btrfs_backref_node, list); + btrfs_backref_cleanup_node(cache, node); + } + + while (!list_empty(&cache->leaves)) { + node = list_entry(cache->leaves.next, + struct btrfs_backref_node, lower); + btrfs_backref_cleanup_node(cache, node); + } + + cache->last_trans = 0; + + for (i = 0; i < BTRFS_MAX_LEVEL; i++) + ASSERT(list_empty(&cache->pending[i])); + ASSERT(list_empty(&cache->pending_edge)); + ASSERT(list_empty(&cache->useless_node)); + ASSERT(list_empty(&cache->changed)); + ASSERT(list_empty(&cache->detached)); + ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); + ASSERT(!cache->nr_nodes); + ASSERT(!cache->nr_edges); +} + +/* + * Handle direct tree backref + * + * Direct tree backref means, the backref item shows its parent bytenr + * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined). + * + * @ref_key: The converted backref key. + * For keyed backref, it's the item key. + * For inlined backref, objectid is the bytenr, + * type is btrfs_inline_ref_type, offset is + * btrfs_inline_ref_offset. + */ +static int handle_direct_tree_backref(struct btrfs_backref_cache *cache, + struct btrfs_key *ref_key, + struct btrfs_backref_node *cur) +{ + struct btrfs_backref_edge *edge; + struct btrfs_backref_node *upper; + struct rb_node *rb_node; + + ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); + + /* Only reloc root uses backref pointing to itself */ + if (ref_key->objectid == ref_key->offset) { + struct btrfs_root *root; + + cur->is_reloc_root = 1; + /* Only reloc backref cache cares about a specific root */ + if (cache->is_reloc) { + root = find_reloc_root(cache->fs_info, cur->bytenr); + if (WARN_ON(!root)) + return -ENOENT; + cur->root = root; + } else { + /* + * For generic purpose backref cache, reloc root node + * is useless. + */ + list_add(&cur->list, &cache->useless_node); + } + return 0; + } + + edge = btrfs_backref_alloc_edge(cache); + if (!edge) + return -ENOMEM; + + rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); + if (!rb_node) { + /* Parent node not yet cached */ + upper = btrfs_backref_alloc_node(cache, ref_key->offset, + cur->level + 1); + if (!upper) { + btrfs_backref_free_edge(cache, edge); + return -ENOMEM; + } + + /* + * Backrefs for the upper level block isn't cached, add the + * block to pending list + */ + list_add_tail(&edge->list[UPPER], &cache->pending_edge); + } else { + /* Parent node already cached */ + upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node); + ASSERT(upper->checked); + INIT_LIST_HEAD(&edge->list[UPPER]); + } + btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); + return 0; +} + +/* + * Handle indirect tree backref + * + * Indirect tree backref means, we only know which tree the node belongs to. + * We still need to do a tree search to find out the parents. This is for + * TREE_BLOCK_REF backref (keyed or inlined). + * + * @ref_key: The same as @ref_key in handle_direct_tree_backref() + * @tree_key: The first key of this tree block. + * @path: A clean (released) path, to avoid allocating path everytime + * the function get called. + */ +static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache, + struct btrfs_path *path, + struct btrfs_key *ref_key, + struct btrfs_key *tree_key, + struct btrfs_backref_node *cur) +{ + struct btrfs_fs_info *fs_info = cache->fs_info; + struct btrfs_backref_node *upper; + struct btrfs_backref_node *lower; + struct btrfs_backref_edge *edge; + struct extent_buffer *eb; + struct btrfs_root *root; + struct rb_node *rb_node; + int level; + bool need_check = true; + int ret; + + root = btrfs_get_fs_root(fs_info, ref_key->offset, false); + if (IS_ERR(root)) + return PTR_ERR(root); + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) + cur->cowonly = 1; + + if (btrfs_root_level(&root->root_item) == cur->level) { + /* Tree root */ + ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); + /* + * For reloc backref cache, we may ignore reloc root. But for + * general purpose backref cache, we can't rely on + * btrfs_should_ignore_reloc_root() as it may conflict with + * current running relocation and lead to missing root. + * + * For general purpose backref cache, reloc root detection is + * completely relying on direct backref (key->offset is parent + * bytenr), thus only do such check for reloc cache. + */ + if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { + btrfs_put_root(root); + list_add(&cur->list, &cache->useless_node); + } else { + cur->root = root; + } + return 0; + } + + level = cur->level + 1; + + /* Search the tree to find parent blocks referring to the block */ + path->search_commit_root = 1; + path->skip_locking = 1; + path->lowest_level = level; + ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0); + path->lowest_level = 0; + if (ret < 0) { + btrfs_put_root(root); + return ret; + } + if (ret > 0 && path->slots[level] > 0) + path->slots[level]--; + + eb = path->nodes[level]; + if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { + btrfs_err(fs_info, +"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", + cur->bytenr, level - 1, root->root_key.objectid, + tree_key->objectid, tree_key->type, tree_key->offset); + btrfs_put_root(root); + ret = -ENOENT; + goto out; + } + lower = cur; + + /* Add all nodes and edges in the path */ + for (; level < BTRFS_MAX_LEVEL; level++) { + if (!path->nodes[level]) { + ASSERT(btrfs_root_bytenr(&root->root_item) == + lower->bytenr); + /* Same as previous should_ignore_reloc_root() call */ + if (btrfs_should_ignore_reloc_root(root) && + cache->is_reloc) { + btrfs_put_root(root); + list_add(&lower->list, &cache->useless_node); + } else { + lower->root = root; + } + break; + } + + edge = btrfs_backref_alloc_edge(cache); + if (!edge) { + btrfs_put_root(root); + ret = -ENOMEM; + goto out; + } + + eb = path->nodes[level]; + rb_node = rb_simple_search(&cache->rb_root, eb->start); + if (!rb_node) { + upper = btrfs_backref_alloc_node(cache, eb->start, + lower->level + 1); + if (!upper) { + btrfs_put_root(root); + btrfs_backref_free_edge(cache, edge); + ret = -ENOMEM; + goto out; + } + upper->owner = btrfs_header_owner(eb); + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) + upper->cowonly = 1; + + /* + * If we know the block isn't shared we can avoid + * checking its backrefs. + */ + if (btrfs_block_can_be_shared(root, eb)) + upper->checked = 0; + else + upper->checked = 1; + + /* + * Add the block to pending list if we need to check its + * backrefs, we only do this once while walking up a + * tree as we will catch anything else later on. + */ + if (!upper->checked && need_check) { + need_check = false; + list_add_tail(&edge->list[UPPER], + &cache->pending_edge); + } else { + if (upper->checked) + need_check = true; + INIT_LIST_HEAD(&edge->list[UPPER]); + } + } else { + upper = rb_entry(rb_node, struct btrfs_backref_node, + rb_node); + ASSERT(upper->checked); + INIT_LIST_HEAD(&edge->list[UPPER]); + if (!upper->owner) + upper->owner = btrfs_header_owner(eb); + } + btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); + + if (rb_node) { + btrfs_put_root(root); + break; + } + lower = upper; + upper = NULL; + } +out: + btrfs_release_path(path); + return ret; +} + +/* + * Add backref node @cur into @cache. + * + * NOTE: Even if the function returned 0, @cur is not yet cached as its upper + * links aren't yet bi-directional. Needs to finish such links. + * Use btrfs_backref_finish_upper_links() to finish such linkage. + * + * @path: Released path for indirect tree backref lookup + * @iter: Released backref iter for extent tree search + * @node_key: The first key of the tree block + */ +int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, + struct btrfs_path *path, + struct btrfs_backref_iter *iter, + struct btrfs_key *node_key, + struct btrfs_backref_node *cur) +{ + struct btrfs_fs_info *fs_info = cache->fs_info; + struct btrfs_backref_edge *edge; + struct btrfs_backref_node *exist; + int ret; + + ret = btrfs_backref_iter_start(iter, cur->bytenr); + if (ret < 0) + return ret; + /* + * We skip the first btrfs_tree_block_info, as we don't use the key + * stored in it, but fetch it from the tree block + */ + if (btrfs_backref_has_tree_block_info(iter)) { + ret = btrfs_backref_iter_next(iter); + if (ret < 0) + goto out; + /* No extra backref? This means the tree block is corrupted */ + if (ret > 0) { + ret = -EUCLEAN; + goto out; + } + } + WARN_ON(cur->checked); + if (!list_empty(&cur->upper)) { + /* + * The backref was added previously when processing backref of + * type BTRFS_TREE_BLOCK_REF_KEY + */ + ASSERT(list_is_singular(&cur->upper)); + edge = list_entry(cur->upper.next, struct btrfs_backref_edge, + list[LOWER]); + ASSERT(list_empty(&edge->list[UPPER])); + exist = edge->node[UPPER]; + /* + * Add the upper level block to pending list if we need check + * its backrefs + */ + if (!exist->checked) + list_add_tail(&edge->list[UPPER], &cache->pending_edge); + } else { + exist = NULL; + } + + for (; ret == 0; ret = btrfs_backref_iter_next(iter)) { + struct extent_buffer *eb; + struct btrfs_key key; + int type; + + cond_resched(); + eb = btrfs_backref_get_eb(iter); + + key.objectid = iter->bytenr; + if (btrfs_backref_iter_is_inline_ref(iter)) { + struct btrfs_extent_inline_ref *iref; + + /* Update key for inline backref */ + iref = (struct btrfs_extent_inline_ref *) + ((unsigned long)iter->cur_ptr); + type = btrfs_get_extent_inline_ref_type(eb, iref, + BTRFS_REF_TYPE_BLOCK); + if (type == BTRFS_REF_TYPE_INVALID) { + ret = -EUCLEAN; + goto out; + } + key.type = type; + key.offset = btrfs_extent_inline_ref_offset(eb, iref); + } else { + key.type = iter->cur_key.type; + key.offset = iter->cur_key.offset; + } + + /* + * Parent node found and matches current inline ref, no need to + * rebuild this node for this inline ref + */ + if (exist && + ((key.type == BTRFS_TREE_BLOCK_REF_KEY && + exist->owner == key.offset) || + (key.type == BTRFS_SHARED_BLOCK_REF_KEY && + exist->bytenr == key.offset))) { + exist = NULL; + continue; + } + + /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ + if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { + ret = handle_direct_tree_backref(cache, &key, cur); + if (ret < 0) + goto out; + continue; + } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { + ret = -EINVAL; + btrfs_print_v0_err(fs_info); + btrfs_handle_fs_error(fs_info, ret, NULL); + goto out; + } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { + continue; + } + + /* + * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset + * means the root objectid. We need to search the tree to get + * its parent bytenr. + */ + ret = handle_indirect_tree_backref(cache, path, &key, node_key, + cur); + if (ret < 0) + goto out; + } + ret = 0; + cur->checked = 1; + WARN_ON(exist); +out: + btrfs_backref_iter_release(iter); + return ret; +} + +/* + * Finish the upwards linkage created by btrfs_backref_add_tree_node() + */ +int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, + struct btrfs_backref_node *start) +{ + struct list_head *useless_node = &cache->useless_node; + struct btrfs_backref_edge *edge; + struct rb_node *rb_node; + LIST_HEAD(pending_edge); + + ASSERT(start->checked); + + /* Insert this node to cache if it's not COW-only */ + if (!start->cowonly) { + rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, + &start->rb_node); + if (rb_node) + btrfs_backref_panic(cache->fs_info, start->bytenr, + -EEXIST); + list_add_tail(&start->lower, &cache->leaves); + } + + /* + * Use breadth first search to iterate all related edges. + * + * The starting points are all the edges of this node + */ + list_for_each_entry(edge, &start->upper, list[LOWER]) + list_add_tail(&edge->list[UPPER], &pending_edge); + + while (!list_empty(&pending_edge)) { + struct btrfs_backref_node *upper; + struct btrfs_backref_node *lower; + struct rb_node *rb_node; + + edge = list_first_entry(&pending_edge, + struct btrfs_backref_edge, list[UPPER]); + list_del_init(&edge->list[UPPER]); + upper = edge->node[UPPER]; + lower = edge->node[LOWER]; + + /* Parent is detached, no need to keep any edges */ + if (upper->detached) { + list_del(&edge->list[LOWER]); + btrfs_backref_free_edge(cache, edge); + + /* Lower node is orphan, queue for cleanup */ + if (list_empty(&lower->upper)) + list_add(&lower->list, useless_node); + continue; + } + + /* + * All new nodes added in current build_backref_tree() haven't + * been linked to the cache rb tree. + * So if we have upper->rb_node populated, this means a cache + * hit. We only need to link the edge, as @upper and all its + * parents have already been linked. + */ + if (!RB_EMPTY_NODE(&upper->rb_node)) { + if (upper->lowest) { + list_del_init(&upper->lower); + upper->lowest = 0; + } + + list_add_tail(&edge->list[UPPER], &upper->lower); + continue; + } + + /* Sanity check, we shouldn't have any unchecked nodes */ + if (!upper->checked) { + ASSERT(0); + return -EUCLEAN; + } + + /* Sanity check, COW-only node has non-COW-only parent */ + if (start->cowonly != upper->cowonly) { + ASSERT(0); + return -EUCLEAN; + } + + /* Only cache non-COW-only (subvolume trees) tree blocks */ + if (!upper->cowonly) { + rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, + &upper->rb_node); + if (rb_node) { + btrfs_backref_panic(cache->fs_info, + upper->bytenr, -EEXIST); + return -EUCLEAN; + } + } + + list_add_tail(&edge->list[UPPER], &upper->lower); + + /* + * Also queue all the parent edges of this uncached node + * to finish the upper linkage + */ + list_for_each_entry(edge, &upper->upper, list[LOWER]) + list_add_tail(&edge->list[UPPER], &pending_edge); + } + return 0; +} + +void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, + struct btrfs_backref_node *node) +{ + struct btrfs_backref_node *lower; + struct btrfs_backref_node *upper; + struct btrfs_backref_edge *edge; + + while (!list_empty(&cache->useless_node)) { + lower = list_first_entry(&cache->useless_node, + struct btrfs_backref_node, list); + list_del_init(&lower->list); + } + while (!list_empty(&cache->pending_edge)) { + edge = list_first_entry(&cache->pending_edge, + struct btrfs_backref_edge, list[UPPER]); + list_del(&edge->list[UPPER]); + list_del(&edge->list[LOWER]); + lower = edge->node[LOWER]; + upper = edge->node[UPPER]; + btrfs_backref_free_edge(cache, edge); + + /* + * Lower is no longer linked to any upper backref nodes and + * isn't in the cache, we can free it ourselves. + */ + if (list_empty(&lower->upper) && + RB_EMPTY_NODE(&lower->rb_node)) + list_add(&lower->list, &cache->useless_node); + + if (!RB_EMPTY_NODE(&upper->rb_node)) + continue; + + /* Add this guy's upper edges to the list to process */ + list_for_each_entry(edge, &upper->upper, list[LOWER]) + list_add_tail(&edge->list[UPPER], + &cache->pending_edge); + if (list_empty(&upper->upper)) + list_add(&upper->list, &cache->useless_node); + } + + while (!list_empty(&cache->useless_node)) { + lower = list_first_entry(&cache->useless_node, + struct btrfs_backref_node, list); + list_del_init(&lower->list); + if (lower == node) + node = NULL; + btrfs_backref_free_node(cache, lower); + } + + btrfs_backref_cleanup_node(cache, node); + ASSERT(list_empty(&cache->useless_node) && + list_empty(&cache->pending_edge)); +} diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 723d6da99114..ff705cc564a9 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -8,6 +8,7 @@ #include <linux/btrfs.h> #include "ulist.h" +#include "disk-io.h" #include "extent_io.h" struct inode_fs_paths { @@ -78,4 +79,300 @@ struct prelim_ref { u64 wanted_disk_byte; }; +/* + * Iterate backrefs of one extent. + * + * Now it only supports iteration of tree block in commit root. + */ +struct btrfs_backref_iter { + u64 bytenr; + struct btrfs_path *path; + struct btrfs_fs_info *fs_info; + struct btrfs_key cur_key; + u32 item_ptr; + u32 cur_ptr; + u32 end_ptr; +}; + +struct btrfs_backref_iter *btrfs_backref_iter_alloc( + struct btrfs_fs_info *fs_info, gfp_t gfp_flag); + +static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter) +{ + if (!iter) + return; + btrfs_free_path(iter->path); + kfree(iter); +} + +static inline struct extent_buffer *btrfs_backref_get_eb( + struct btrfs_backref_iter *iter) +{ + if (!iter) + return NULL; + return iter->path->nodes[0]; +} + +/* + * For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data + * is btrfs_tree_block_info, without a btrfs_extent_inline_ref header. + * + * This helper determines if that's the case. + */ +static inline bool btrfs_backref_has_tree_block_info( + struct btrfs_backref_iter *iter) +{ + if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY && + iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item)) + return true; + return false; +} + +int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr); + +int btrfs_backref_iter_next(struct btrfs_backref_iter *iter); + +static inline bool btrfs_backref_iter_is_inline_ref( + struct btrfs_backref_iter *iter) +{ + if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY || + iter->cur_key.type == BTRFS_METADATA_ITEM_KEY) + return true; + return false; +} + +static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter) +{ + iter->bytenr = 0; + iter->item_ptr = 0; + iter->cur_ptr = 0; + iter->end_ptr = 0; + btrfs_release_path(iter->path); + memset(&iter->cur_key, 0, sizeof(iter->cur_key)); +} + +/* + * Backref cache related structures + * + * The whole objective of backref_cache is to build a bi-directional map + * of tree blocks (represented by backref_node) and all their parents. + */ + +/* + * Represent a tree block in the backref cache + */ +struct btrfs_backref_node { + struct { + struct rb_node rb_node; + u64 bytenr; + }; /* Use rb_simple_node for search/insert */ + + u64 new_bytenr; + /* Objectid of tree block owner, can be not uptodate */ + u64 owner; + /* Link to pending, changed or detached list */ + struct list_head list; + + /* List of upper level edges, which link this node to its parents */ + struct list_head upper; + /* List of lower level edges, which link this node to its children */ + struct list_head lower; + + /* NULL if this node is not tree root */ + struct btrfs_root *root; + /* Extent buffer got by COWing the block */ + struct extent_buffer *eb; + /* Level of the tree block */ + unsigned int level:8; + /* Is the block in a non-shareable tree */ + unsigned int cowonly:1; + /* 1 if no child node is in the cache */ + unsigned int lowest:1; + /* Is the extent buffer locked */ + unsigned int locked:1; + /* Has the block been processed */ + unsigned int processed:1; + /* Have backrefs of this block been checked */ + unsigned int checked:1; + /* + * 1 if corresponding block has been COWed but some upper level block + * pointers may not point to the new location + */ + unsigned int pending:1; + /* 1 if the backref node isn't connected to any other backref node */ + unsigned int detached:1; + + /* + * For generic purpose backref cache, where we only care if it's a reloc + * root, doesn't care the source subvolid. + */ + unsigned int is_reloc_root:1; +}; + +#define LOWER 0 +#define UPPER 1 + +/* + * Represent an edge connecting upper and lower backref nodes. + */ +struct btrfs_backref_edge { + /* + * list[LOWER] is linked to btrfs_backref_node::upper of lower level + * node, and list[UPPER] is linked to btrfs_backref_node::lower of + * upper level node. + * + * Also, build_backref_tree() uses list[UPPER] for pending edges, before + * linking list[UPPER] to its upper level nodes. + */ + struct list_head list[2]; + + /* Two related nodes */ + struct btrfs_backref_node *node[2]; +}; + +struct btrfs_backref_cache { + /* Red black tree of all backref nodes in the cache */ + struct rb_root rb_root; + /* For passing backref nodes to btrfs_reloc_cow_block */ + struct btrfs_backref_node *path[BTRFS_MAX_LEVEL]; + /* + * List of blocks that have been COWed but some block pointers in upper + * level blocks may not reflect the new location + */ + struct list_head pending[BTRFS_MAX_LEVEL]; + /* List of backref nodes with no child node */ + struct list_head leaves; + /* List of blocks that have been COWed in current transaction */ + struct list_head changed; + /* List of detached backref node. */ + struct list_head detached; + + u64 last_trans; + + int nr_nodes; + int nr_edges; + + /* List of unchecked backref edges during backref cache build */ + struct list_head pending_edge; + + /* List of useless backref nodes during backref cache build */ + struct list_head useless_node; + + struct btrfs_fs_info *fs_info; + + /* + * Whether this cache is for relocation + * + * Reloction backref cache require more info for reloc root compared + * to generic backref cache. + */ + unsigned int is_reloc; +}; + +void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, + struct btrfs_backref_cache *cache, int is_reloc); +struct btrfs_backref_node *btrfs_backref_alloc_node( + struct btrfs_backref_cache *cache, u64 bytenr, int level); +struct btrfs_backref_edge *btrfs_backref_alloc_edge( + struct btrfs_backref_cache *cache); + +#define LINK_LOWER (1 << 0) +#define LINK_UPPER (1 << 1) +static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, + struct btrfs_backref_node *lower, + struct btrfs_backref_node *upper, + int link_which) +{ + ASSERT(upper && lower && upper->level == lower->level + 1); + edge->node[LOWER] = lower; + edge->node[UPPER] = upper; + if (link_which & LINK_LOWER) + list_add_tail(&edge->list[LOWER], &lower->upper); + if (link_which & LINK_UPPER) + list_add_tail(&edge->list[UPPER], &upper->lower); +} + +static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache, + struct btrfs_backref_node *node) +{ + if (node) { + cache->nr_nodes--; + btrfs_put_root(node->root); + kfree(node); + } +} + +static inline void btrfs_backref_free_edge(struct btrfs_backref_cache *cache, + struct btrfs_backref_edge *edge) +{ + if (edge) { + cache->nr_edges--; + kfree(edge); + } +} + +static inline void btrfs_backref_unlock_node_buffer( + struct btrfs_backref_node *node) +{ + if (node->locked) { + btrfs_tree_unlock(node->eb); + node->locked = 0; + } +} + +static inline void btrfs_backref_drop_node_buffer( + struct btrfs_backref_node *node) +{ + if (node->eb) { + btrfs_backref_unlock_node_buffer(node); + free_extent_buffer(node->eb); + node->eb = NULL; + } +} + +/* + * Drop the backref node from cache without cleaning up its children + * edges. + * + * This can only be called on node without parent edges. + * The children edges are still kept as is. + */ +static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree, + struct btrfs_backref_node *node) +{ + BUG_ON(!list_empty(&node->upper)); + + btrfs_backref_drop_node_buffer(node); + list_del(&node->list); + list_del(&node->lower); + if (!RB_EMPTY_NODE(&node->rb_node)) + rb_erase(&node->rb_node, &tree->rb_root); + btrfs_backref_free_node(tree, node); +} + +void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, + struct btrfs_backref_node *node); + +void btrfs_backref_release_cache(struct btrfs_backref_cache *cache); + +static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info, + u64 bytenr, int errno) +{ + btrfs_panic(fs_info, errno, + "Inconsistency in backref cache found at offset %llu", + bytenr); +} + +int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, + struct btrfs_path *path, + struct btrfs_backref_iter *iter, + struct btrfs_key *node_key, + struct btrfs_backref_node *cur); + +int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, + struct btrfs_backref_node *start); + +void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, + struct btrfs_backref_node *node); + #endif diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 696f47103cfc..176e8a292fd1 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -7,7 +7,6 @@ #include "disk-io.h" #include "free-space-cache.h" #include "free-space-tree.h" -#include "disk-io.h" #include "volumes.h" #include "transaction.h" #include "ref-verify.h" @@ -161,6 +160,8 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, struct rb_node *parent = NULL; struct btrfs_block_group *cache; + ASSERT(block_group->length != 0); + spin_lock(&info->block_group_cache_lock); p = &info->block_group_cache_tree.rb_node; @@ -863,11 +864,34 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) } } +static int remove_block_group_item(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_block_group *block_group) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *root; + struct btrfs_key key; + int ret; + + root = fs_info->extent_root; + key.objectid = block_group->start; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + key.offset = block_group->length; + + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret > 0) + ret = -ENOENT; + if (ret < 0) + return ret; + + ret = btrfs_del_item(trans, root, path); + return ret; +} + int btrfs_remove_block_group(struct btrfs_trans_handle *trans, u64 group_start, struct extent_map *em) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *root = fs_info->extent_root; struct btrfs_path *path; struct btrfs_block_group *block_group; struct btrfs_free_cluster *cluster; @@ -1065,26 +1089,25 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, spin_unlock(&block_group->space_info->lock); - key.objectid = block_group->start; - key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; - key.offset = block_group->length; - mutex_lock(&fs_info->chunk_mutex); spin_lock(&block_group->lock); block_group->removed = 1; /* - * At this point trimming can't start on this block group, because we - * removed the block group from the tree fs_info->block_group_cache_tree - * so no one can't find it anymore and even if someone already got this - * block group before we removed it from the rbtree, they have already - * incremented block_group->trimming - if they didn't, they won't find - * any free space entries because we already removed them all when we - * called btrfs_remove_free_space_cache(). + * At this point trimming or scrub can't start on this block group, + * because we removed the block group from the rbtree + * fs_info->block_group_cache_tree so no one can't find it anymore and + * even if someone already got this block group before we removed it + * from the rbtree, they have already incremented block_group->frozen - + * if they didn't, for the trimming case they won't find any free space + * entries because we already removed them all when we called + * btrfs_remove_free_space_cache(). * * And we must not remove the extent map from the fs_info->mapping_tree * to prevent the same logical address range and physical device space - * ranges from being reused for a new block group. This is because our - * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is + * ranges from being reused for a new block group. This is needed to + * avoid races with trimming and scrub. + * + * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is * completely transactionless, so while it is trimming a range the * currently running transaction might finish and a new one start, * allowing for new block groups to be created that can reuse the same @@ -1095,7 +1118,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, * in place until the extents have been discarded completely when * the transaction commit has completed. */ - remove_em = (atomic_read(&block_group->trimming) == 0); + remove_em = (atomic_read(&block_group->frozen) == 0); spin_unlock(&block_group->lock); mutex_unlock(&fs_info->chunk_mutex); @@ -1107,16 +1130,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, /* Once for the block groups rbtree */ btrfs_put_block_group(block_group); - ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret > 0) - ret = -EIO; + ret = remove_block_group_item(trans, path, block_group); if (ret < 0) goto out; - ret = btrfs_del_item(trans, root, path); - if (ret) - goto out; - if (remove_em) { struct extent_map_tree *em_tree; @@ -1175,7 +1192,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( free_extent_map(em); return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, - num_items, 1); + num_items); } /* @@ -1284,25 +1301,17 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans, ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, EXTENT_DIRTY); if (ret) - goto err; + goto out; } ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, EXTENT_DIRTY); - if (ret) - goto err; +out: mutex_unlock(&fs_info->unused_bg_unpin_mutex); if (prev_trans) btrfs_put_transaction(prev_trans); - return true; - -err: - mutex_unlock(&fs_info->unused_bg_unpin_mutex); - if (prev_trans) - btrfs_put_transaction(prev_trans); - btrfs_dec_block_group_ro(bg); - return false; + return ret == 0; } /* @@ -1400,8 +1409,10 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * We could have pending pinned extents for this block group, * just delete them, we don't care about them anymore. */ - if (!clean_pinned_extents(trans, block_group)) + if (!clean_pinned_extents(trans, block_group)) { + btrfs_dec_block_group_ro(block_group); goto end_trans; + } /* * At this point, the block_group is read only and should fail @@ -1450,7 +1461,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) /* Implicit trim during transaction commit. */ if (trimming) - btrfs_get_block_group_trimming(block_group); + btrfs_freeze_block_group(block_group); /* * Btrfs_remove_chunk will abort the transaction if things go @@ -1460,7 +1471,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) if (ret) { if (trimming) - btrfs_put_block_group_trimming(block_group); + btrfs_unfreeze_block_group(block_group); goto end_trans; } @@ -1774,7 +1785,7 @@ static void link_block_group(struct btrfs_block_group *cache) } static struct btrfs_block_group *btrfs_create_block_group_cache( - struct btrfs_fs_info *fs_info, u64 start, u64 size) + struct btrfs_fs_info *fs_info, u64 start) { struct btrfs_block_group *cache; @@ -1790,7 +1801,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache( } cache->start = start; - cache->length = size; cache->fs_info = fs_info; cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); @@ -1809,7 +1819,7 @@ static struct btrfs_block_group *btrfs_create_block_group_cache( INIT_LIST_HEAD(&cache->dirty_list); INIT_LIST_HEAD(&cache->io_list); btrfs_init_free_space_ctl(cache); - atomic_set(&cache->trimming, 0); + atomic_set(&cache->frozen, 0); mutex_init(&cache->free_space_lock); btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); @@ -1870,25 +1880,44 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) return ret; } +static int read_block_group_item(struct btrfs_block_group *cache, + struct btrfs_path *path, + const struct btrfs_key *key) +{ + struct extent_buffer *leaf = path->nodes[0]; + struct btrfs_block_group_item bgi; + int slot = path->slots[0]; + + cache->length = key->offset; + + read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), + sizeof(bgi)); + cache->used = btrfs_stack_block_group_used(&bgi); + cache->flags = btrfs_stack_block_group_flags(&bgi); + + return 0; +} + static int read_one_block_group(struct btrfs_fs_info *info, struct btrfs_path *path, const struct btrfs_key *key, int need_clear) { - struct extent_buffer *leaf = path->nodes[0]; struct btrfs_block_group *cache; struct btrfs_space_info *space_info; - struct btrfs_block_group_item bgi; const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); - int slot = path->slots[0]; int ret; ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); - cache = btrfs_create_block_group_cache(info, key->objectid, key->offset); + cache = btrfs_create_block_group_cache(info, key->objectid); if (!cache) return -ENOMEM; + ret = read_block_group_item(cache, path, key); + if (ret < 0) + goto error; + if (need_clear) { /* * When we mount with old space cache, we need to @@ -1903,10 +1932,6 @@ static int read_one_block_group(struct btrfs_fs_info *info, if (btrfs_test_opt(info, SPACE_CACHE)) cache->disk_cache_state = BTRFS_DC_CLEAR; } - read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), - sizeof(bgi)); - cache->used = btrfs_stack_block_group_used(&bgi); - cache->flags = btrfs_stack_block_group_flags(&bgi); if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { btrfs_err(info, @@ -1934,15 +1959,15 @@ static int read_one_block_group(struct btrfs_fs_info *info, * are empty, and we can just add all the space in and be done with it. * This saves us _a_lot_ of time, particularly in the full case. */ - if (key->offset == cache->used) { + if (cache->length == cache->used) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; btrfs_free_excluded_extents(cache); } else if (cache->used == 0) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; - add_new_free_space(cache, key->objectid, - key->objectid + key->offset); + add_new_free_space(cache, cache->start, + cache->start + cache->length); btrfs_free_excluded_extents(cache); } @@ -1952,7 +1977,7 @@ static int read_one_block_group(struct btrfs_fs_info *info, goto error; } trace_btrfs_add_block_group(info, cache, 0); - btrfs_update_space_info(info, cache->flags, key->offset, + btrfs_update_space_info(info, cache->flags, cache->length, cache->used, cache->bytes_super, &space_info); cache->space_info = space_info; @@ -1991,7 +2016,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) path = btrfs_alloc_path(); if (!path) return -ENOMEM; - path->reada = READA_FORWARD; cache_gen = btrfs_super_cache_generation(info->super_copy); if (btrfs_test_opt(info, SPACE_CACHE) && @@ -2046,13 +2070,32 @@ error: return ret; } +static int insert_block_group_item(struct btrfs_trans_handle *trans, + struct btrfs_block_group *block_group) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_block_group_item bgi; + struct btrfs_root *root; + struct btrfs_key key; + + spin_lock(&block_group->lock); + btrfs_set_stack_block_group_used(&bgi, block_group->used); + btrfs_set_stack_block_group_chunk_objectid(&bgi, + BTRFS_FIRST_CHUNK_TREE_OBJECTID); + btrfs_set_stack_block_group_flags(&bgi, block_group->flags); + key.objectid = block_group->start; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + key.offset = block_group->length; + spin_unlock(&block_group->lock); + + root = fs_info->extent_root; + return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); +} + void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *block_group; - struct btrfs_root *extent_root = fs_info->extent_root; - struct btrfs_block_group_item item; - struct btrfs_key key; int ret = 0; if (!trans->can_flush_pending_bgs) @@ -2065,21 +2108,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) if (ret) goto next; - spin_lock(&block_group->lock); - btrfs_set_stack_block_group_used(&item, block_group->used); - btrfs_set_stack_block_group_chunk_objectid(&item, - BTRFS_FIRST_CHUNK_TREE_OBJECTID); - btrfs_set_stack_block_group_flags(&item, block_group->flags); - key.objectid = block_group->start; - key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; - key.offset = block_group->length; - spin_unlock(&block_group->lock); - - ret = btrfs_insert_item(trans, extent_root, &key, &item, - sizeof(item)); + ret = insert_block_group_item(trans, block_group); if (ret) btrfs_abort_transaction(trans, ret); - ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset); + ret = btrfs_finish_chunk_alloc(trans, block_group->start, + block_group->length); if (ret) btrfs_abort_transaction(trans, ret); add_block_group_free_space(trans, block_group); @@ -2100,10 +2133,11 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, btrfs_set_log_full_commit(trans); - cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size); + cache = btrfs_create_block_group_cache(fs_info, chunk_offset); if (!cache) return -ENOMEM; + cache->length = size; cache->used = bytes_used; cache->flags = type; cache->last_byte_to_unpin = (u64)-1; @@ -2314,13 +2348,13 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) spin_unlock(&sinfo->lock); } -static int write_one_cache_group(struct btrfs_trans_handle *trans, - struct btrfs_path *path, - struct btrfs_block_group *cache) +static int update_block_group_item(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; - struct btrfs_root *extent_root = fs_info->extent_root; + struct btrfs_root *root = fs_info->extent_root; unsigned long bi; struct extent_buffer *leaf; struct btrfs_block_group_item bgi; @@ -2330,7 +2364,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; key.offset = cache->length; - ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1); + ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret) { if (ret > 0) ret = -ENOENT; @@ -2642,7 +2676,7 @@ again: } } if (!ret) { - ret = write_one_cache_group(trans, path, cache); + ret = update_block_group_item(trans, path, cache); /* * Our block group might still be attached to the list * of new block groups in the transaction handle of some @@ -2791,7 +2825,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) } } if (!ret) { - ret = write_one_cache_group(trans, path, cache); + ret = update_block_group_item(trans, path, cache); /* * One of the free space endio workers might have * created a new block group while updating a free space @@ -2808,7 +2842,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) if (ret == -ENOENT) { wait_event(cur_trans->writer_wait, atomic_read(&cur_trans->num_writers) == 1); - ret = write_one_cache_group(trans, path, cache); + ret = update_block_group_item(trans, path, cache); } if (ret) btrfs_abort_transaction(trans, ret); @@ -3384,3 +3418,44 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) } return 0; } + +void btrfs_freeze_block_group(struct btrfs_block_group *cache) +{ + atomic_inc(&cache->frozen); +} + +void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) +{ + struct btrfs_fs_info *fs_info = block_group->fs_info; + struct extent_map_tree *em_tree; + struct extent_map *em; + bool cleanup; + + spin_lock(&block_group->lock); + cleanup = (atomic_dec_and_test(&block_group->frozen) && + block_group->removed); + spin_unlock(&block_group->lock); + + if (cleanup) { + mutex_lock(&fs_info->chunk_mutex); + em_tree = &fs_info->mapping_tree; + write_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, block_group->start, + 1); + BUG_ON(!em); /* logic error, can't happen */ + remove_extent_mapping(em_tree, em); + write_unlock(&em_tree->lock); + mutex_unlock(&fs_info->chunk_mutex); + + /* once for us and once for the tree */ + free_extent_map(em); + free_extent_map(em); + + /* + * We may have left one free space entry and other possible + * tasks trimming this block group have left 1 entry each one. + * Free them if any. + */ + __btrfs_remove_free_space_cache(block_group->free_space_ctl); + } +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 107bb557ca8d..b6ee70a039c7 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -129,8 +129,17 @@ struct btrfs_block_group { /* For read-only block groups */ struct list_head ro_list; + /* + * When non-zero it means the block group's logical address and its + * device extents can not be reused for future block group allocations + * until the counter goes down to 0. This is to prevent them from being + * reused while some task is still using the block group after it was + * deleted - we want to make sure they can only be reused for new block + * groups after that task is done with the deleted block group. + */ + atomic_t frozen; + /* For discard operations */ - atomic_t trimming; struct list_head discard_list; int discard_index; u64 discard_eligible_time; @@ -283,6 +292,9 @@ static inline int btrfs_block_group_done(struct btrfs_block_group *cache) cache->cached == BTRFS_CACHE_ERROR; } +void btrfs_freeze_block_group(struct btrfs_block_group *cache); +void btrfs_unfreeze_block_group(struct btrfs_block_group *cache); + #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, u64 physical, u64 **logical, int *naddrs, int *stripe_len); diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index 27efec8f7c5b..7e1549a84fcc 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -5,6 +5,7 @@ #include "block-rsv.h" #include "space-info.h" #include "transaction.h" +#include "block-group.h" /* * HOW DO BLOCK RESERVES WORK @@ -405,6 +406,8 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) else block_rsv->full = 0; + if (block_rsv->size >= sinfo->total_bytes) + sinfo->force_alloc = CHUNK_ALLOC_FORCE; spin_unlock(&block_rsv->lock); spin_unlock(&sinfo->lock); } @@ -455,7 +458,7 @@ static struct btrfs_block_rsv *get_block_rsv( struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *block_rsv = NULL; - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || (root == fs_info->csum_root && trans->adding_csums) || (root == fs_info->uuid_root)) block_rsv = trans->block_rsv; diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 27a1fefce508..aeff56a0e105 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -7,6 +7,7 @@ #define BTRFS_INODE_H #include <linux/hash.h> +#include <linux/refcount.h> #include "extent_map.h" #include "extent_io.h" #include "ordered-data.h" @@ -27,7 +28,6 @@ enum { BTRFS_INODE_NEEDS_FULL_SYNC, BTRFS_INODE_COPY_EVERYTHING, BTRFS_INODE_IN_DELALLOC_LIST, - BTRFS_INODE_READDIO_NEED_LOCK, BTRFS_INODE_HAS_PROPS, BTRFS_INODE_SNAPSHOT_FLUSH, }; @@ -293,53 +293,25 @@ static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation) return ret; } -#define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1 - struct btrfs_dio_private { struct inode *inode; - unsigned long flags; u64 logical_offset; u64 disk_bytenr; u64 bytes; - void *private; - - /* number of bios pending for this dio */ - atomic_t pending_bios; - /* IO errors */ - int errors; - - /* orig_bio is our btrfs_io_bio */ - struct bio *orig_bio; + /* + * References to this structure. There is one reference per in-flight + * bio plus one while we're still setting up. + */ + refcount_t refs; /* dio_bio came from fs/direct-io.c */ struct bio *dio_bio; - /* - * The original bio may be split to several sub-bios, this is - * done during endio of sub-bios - */ - blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *, - blk_status_t); + /* Array of checksums */ + u8 csums[]; }; -/* - * Disable DIO read nolock optimization, so new dio readers will be forced - * to grab i_mutex. It is used to avoid the endless truncate due to - * nonlocked dio read. - */ -static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode) -{ - set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); - smp_mb(); -} - -static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode) -{ - smp_mb__before_atomic(); - clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); -} - /* Array of bytes with variable length, hexadecimal format 0x1234 */ #define CSUM_FMT "0x%*phN" #define CSUM_FMT_VALUE(size, bytes) size, bytes diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 9ab610cc9114..c6e648603f85 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -194,11 +194,9 @@ static int check_compressed_csum(struct btrfs_inode *inode, for (i = 0; i < cb->nr_pages; i++) { page = cb->compressed_pages[i]; - crypto_shash_init(shash); kaddr = kmap_atomic(page); - crypto_shash_update(shash, kaddr, PAGE_SIZE); + crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum); kunmap_atomic(kaddr); - crypto_shash_final(shash, (u8 *)&csum); if (memcmp(&csum, cb_sum, csum_size)) { btrfs_print_data_csum_error(inode, disk_start, @@ -1142,6 +1140,22 @@ static void put_workspace(int type, struct list_head *ws) } /* + * Adjust @level according to the limits of the compression algorithm or + * fallback to default + */ +static unsigned int btrfs_compress_set_level(int type, unsigned level) +{ + const struct btrfs_compress_op *ops = btrfs_compress_op[type]; + + if (level == 0) + level = ops->default_level; + else + level = min(level, ops->max_level); + + return level; +} + +/* * Given an address space and start and length, compress the bytes into @pages * that are allocated on demand. * @@ -1748,19 +1762,3 @@ unsigned int btrfs_compress_str2level(unsigned int type, const char *str) return level; } - -/* - * Adjust @level according to the limits of the compression algorithm or - * fallback to default - */ -unsigned int btrfs_compress_set_level(int type, unsigned level) -{ - const struct btrfs_compress_op *ops = btrfs_compress_op[type]; - - if (level == 0) - level = ops->default_level; - else - level = min(level, ops->max_level); - - return level; -} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index d253f7aa8ed5..284a3ad31350 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -140,8 +140,6 @@ extern const struct btrfs_compress_op btrfs_zstd_compress; const char* btrfs_compress_type2str(enum btrfs_compression_type type); bool btrfs_compress_is_valid_type(const char *str, size_t len); -unsigned int btrfs_compress_set_level(int type, unsigned level); - int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end); #endif diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index bfedbbe2311f..3a7648bff42c 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -144,9 +144,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) return eb; } -/* cowonly root (everything not a reference counted cow subvolume), just get - * put onto a simple dirty list. transaction.c walks this to make sure they - * get properly updated on disk. +/* + * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), + * just get put onto a simple dirty list. Transaction walks this list to make + * sure they get properly updated on disk. */ static void add_root_to_dirty_list(struct btrfs_root *root) { @@ -185,9 +186,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, int level; struct btrfs_disk_key disk_key; - WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && + WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && trans->transid != fs_info->running_transaction->transid); - WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && + WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && trans->transid != root->last_trans); level = btrfs_header_level(buf); @@ -826,12 +827,11 @@ int btrfs_block_can_be_shared(struct btrfs_root *root, struct extent_buffer *buf) { /* - * Tree blocks not in reference counted trees and tree roots - * are never shared. If a block was allocated after the last - * snapshot and the block was not allocated by tree relocation, - * we know the block is not shared. + * Tree blocks not in shareable trees and tree roots are never shared. + * If a block was allocated after the last snapshot and the block was + * not allocated by tree relocation, we know the block is not shared. */ - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && buf != root->node && buf != root->commit_root && (btrfs_header_generation(buf) <= btrfs_root_last_snapshot(&root->root_item) || @@ -1024,9 +1024,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_assert_tree_locked(buf); - WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && + WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && trans->transid != fs_info->running_transaction->transid); - WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && + WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && trans->transid != root->last_trans); level = btrfs_header_level(buf); @@ -1065,7 +1065,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, return ret; } - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { ret = btrfs_reloc_cow_block(trans, root, buf, cow); if (ret) { btrfs_abort_transaction(trans, ret); @@ -1668,15 +1668,8 @@ static noinline int generic_bin_search(struct extent_buffer *eb, { int low = 0; int high = max; - int mid; int ret; - struct btrfs_disk_key *tmp = NULL; - struct btrfs_disk_key unaligned; - unsigned long offset; - char *kaddr = NULL; - unsigned long map_start = 0; - unsigned long map_len = 0; - int err; + const int key_size = sizeof(struct btrfs_disk_key); if (low > high) { btrfs_err(eb->fs_info, @@ -1687,32 +1680,26 @@ static noinline int generic_bin_search(struct extent_buffer *eb, } while (low < high) { + unsigned long oip; + unsigned long offset; + struct btrfs_disk_key *tmp; + struct btrfs_disk_key unaligned; + int mid; + mid = (low + high) / 2; offset = p + mid * item_size; + oip = offset_in_page(offset); - if (!kaddr || offset < map_start || - (offset + sizeof(struct btrfs_disk_key)) > - map_start + map_len) { - - err = map_private_extent_buffer(eb, offset, - sizeof(struct btrfs_disk_key), - &kaddr, &map_start, &map_len); - - if (!err) { - tmp = (struct btrfs_disk_key *)(kaddr + offset - - map_start); - } else if (err == 1) { - read_extent_buffer(eb, &unaligned, - offset, sizeof(unaligned)); - tmp = &unaligned; - } else { - return err; - } + if (oip + key_size <= PAGE_SIZE) { + const unsigned long idx = offset >> PAGE_SHIFT; + char *kaddr = page_address(eb->pages[idx]); + tmp = (struct btrfs_disk_key *)(kaddr + oip); } else { - tmp = (struct btrfs_disk_key *)(kaddr + offset - - map_start); + read_extent_buffer(eb, &unaligned, offset, key_size); + tmp = &unaligned; } + ret = comp_keys(tmp, key); if (ret < 0) @@ -1733,9 +1720,9 @@ static noinline int generic_bin_search(struct extent_buffer *eb, * leaves vs nodes */ int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, - int level, int *slot) + int *slot) { - if (level == 0) + if (btrfs_header_level(eb) == 0) return generic_bin_search(eb, offsetof(struct btrfs_leaf, items), sizeof(struct btrfs_item), @@ -2348,16 +2335,15 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, struct btrfs_fs_info *fs_info = root->fs_info; u64 blocknr; u64 gen; - struct extent_buffer *b = *eb_ret; struct extent_buffer *tmp; struct btrfs_key first_key; int ret; int parent_level; - blocknr = btrfs_node_blockptr(b, slot); - gen = btrfs_node_ptr_generation(b, slot); - parent_level = btrfs_header_level(b); - btrfs_node_key_to_cpu(b, &first_key, slot); + blocknr = btrfs_node_blockptr(*eb_ret, slot); + gen = btrfs_node_ptr_generation(*eb_ret, slot); + parent_level = btrfs_header_level(*eb_ret); + btrfs_node_key_to_cpu(*eb_ret, &first_key, slot); tmp = find_extent_buffer(fs_info, blocknr); if (tmp) { @@ -2501,19 +2487,6 @@ done: return ret; } -static int key_search(struct extent_buffer *b, const struct btrfs_key *key, - int level, int *prev_cmp, int *slot) -{ - if (*prev_cmp != 0) { - *prev_cmp = btrfs_bin_search(b, key, level, slot); - return *prev_cmp; - } - - *slot = 0; - - return 0; -} - int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, u64 iobjectid, u64 ioff, u8 key_type, struct btrfs_key *found_key) @@ -2783,9 +2756,23 @@ cow_done: } } - ret = key_search(b, key, level, &prev_cmp, &slot); - if (ret < 0) - goto done; + /* + * If btrfs_bin_search returns an exact match (prev_cmp == 0) + * we can safely assume the target key will always be in slot 0 + * on lower levels due to the invariants BTRFS' btree provides, + * namely that a btrfs_key_ptr entry always points to the + * lowest key in the child node, thus we can skip searching + * lower levels + */ + if (prev_cmp == 0) { + slot = 0; + ret = 0; + } else { + ret = btrfs_bin_search(b, key, &slot); + prev_cmp = ret; + if (ret < 0) + goto done; + } if (level == 0) { p->slots[level] = slot; @@ -2909,7 +2896,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, int level; int lowest_unlock = 1; u8 lowest_level = 0; - int prev_cmp = -1; lowest_level = p->lowest_level; WARN_ON(p->nodes[0] != NULL); @@ -2942,12 +2928,7 @@ again: */ btrfs_unlock_up_safe(p, level + 1); - /* - * Since we can unwind ebs we want to do a real search every - * time. - */ - prev_cmp = -1; - ret = key_search(b, key, level, &prev_cmp, &slot); + ret = btrfs_bin_search(b, key, &slot); if (ret < 0) goto done; @@ -3507,19 +3488,17 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr) { struct btrfs_item *start_item; struct btrfs_item *end_item; - struct btrfs_map_token token; int data_len; int nritems = btrfs_header_nritems(l); int end = min(nritems, start + nr) - 1; if (!nr) return 0; - btrfs_init_map_token(&token, l); start_item = btrfs_item_nr(start); end_item = btrfs_item_nr(end); - data_len = btrfs_token_item_offset(l, start_item, &token) + - btrfs_token_item_size(l, start_item, &token); - data_len = data_len - btrfs_token_item_offset(l, end_item, &token); + data_len = btrfs_item_offset(l, start_item) + + btrfs_item_size(l, start_item); + data_len = data_len - btrfs_item_offset(l, end_item); data_len += sizeof(struct btrfs_item) * nr; WARN_ON(data_len < 0); return data_len; @@ -3650,8 +3629,8 @@ static noinline int __push_leaf_right(struct btrfs_path *path, push_space = BTRFS_LEAF_DATA_SIZE(fs_info); for (i = 0; i < right_nritems; i++) { item = btrfs_item_nr(i); - push_space -= btrfs_token_item_size(right, item, &token); - btrfs_set_token_item_offset(right, item, push_space, &token); + push_space -= btrfs_token_item_size(&token, item); + btrfs_set_token_item_offset(&token, item, push_space); } left_nritems -= push_items; @@ -3859,10 +3838,9 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, item = btrfs_item_nr(i); - ioff = btrfs_token_item_offset(left, item, &token); - btrfs_set_token_item_offset(left, item, - ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size), - &token); + ioff = btrfs_token_item_offset(&token, item); + btrfs_set_token_item_offset(&token, item, + ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); } btrfs_set_header_nritems(left, old_left_nritems + push_items); @@ -3892,9 +3870,8 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, for (i = 0; i < right_nritems; i++) { item = btrfs_item_nr(i); - push_space = push_space - btrfs_token_item_size(right, - item, &token); - btrfs_set_token_item_offset(right, item, push_space, &token); + push_space = push_space - btrfs_token_item_size(&token, item); + btrfs_set_token_item_offset(&token, item, push_space); } btrfs_mark_buffer_dirty(left); @@ -4036,9 +4013,8 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans, struct btrfs_item *item = btrfs_item_nr(i); u32 ioff; - ioff = btrfs_token_item_offset(right, item, &token); - btrfs_set_token_item_offset(right, item, - ioff + rt_data_off, &token); + ioff = btrfs_token_item_offset(&token, item); + btrfs_set_token_item_offset(&token, item, ioff + rt_data_off); } btrfs_set_header_nritems(l, mid); @@ -4541,9 +4517,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) u32 ioff; item = btrfs_item_nr(i); - ioff = btrfs_token_item_offset(leaf, item, &token); - btrfs_set_token_item_offset(leaf, item, - ioff + size_diff, &token); + ioff = btrfs_token_item_offset(&token, item); + btrfs_set_token_item_offset(&token, item, ioff + size_diff); } /* shift the data */ @@ -4640,9 +4615,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size) u32 ioff; item = btrfs_item_nr(i); - ioff = btrfs_token_item_offset(leaf, item, &token); - btrfs_set_token_item_offset(leaf, item, - ioff - data_size, &token); + ioff = btrfs_token_item_offset(&token, item); + btrfs_set_token_item_offset(&token, item, ioff - data_size); } /* shift the data */ @@ -4718,9 +4692,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, u32 ioff; item = btrfs_item_nr(i); - ioff = btrfs_token_item_offset(leaf, item, &token); - btrfs_set_token_item_offset(leaf, item, - ioff - total_data, &token); + ioff = btrfs_token_item_offset(&token, item); + btrfs_set_token_item_offset(&token, item, + ioff - total_data); } /* shift the items */ memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), @@ -4739,10 +4713,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); btrfs_set_item_key(leaf, &disk_key, slot + i); item = btrfs_item_nr(slot + i); - btrfs_set_token_item_offset(leaf, item, - data_end - data_size[i], &token); + btrfs_set_token_item_offset(&token, item, data_end - data_size[i]); data_end -= data_size[i]; - btrfs_set_token_item_size(leaf, item, data_size[i], &token); + btrfs_set_token_item_size(&token, item, data_size[i]); } btrfs_set_header_nritems(leaf, nritems + nr); @@ -4930,9 +4903,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 ioff; item = btrfs_item_nr(i); - ioff = btrfs_token_item_offset(leaf, item, &token); - btrfs_set_token_item_offset(leaf, item, - ioff + dsize, &token); + ioff = btrfs_token_item_offset(&token, item); + btrfs_set_token_item_offset(&token, item, ioff + dsize); } memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), @@ -5103,7 +5075,7 @@ again: while (1) { nritems = btrfs_header_nritems(cur); level = btrfs_header_level(cur); - sret = btrfs_bin_search(cur, min_key, level, &slot); + sret = btrfs_bin_search(cur, min_key, &slot); if (sret < 0) { ret = sret; goto out; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8aa7b9dac405..161533040978 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -28,6 +28,7 @@ #include <linux/dynamic_debug.h> #include <linux/refcount.h> #include <linux/crc32c.h> +#include <linux/iomap.h> #include "extent-io-tree.h" #include "extent_io.h" #include "extent_map.h" @@ -582,6 +583,7 @@ struct btrfs_fs_info { struct btrfs_root *quota_root; struct btrfs_root *uuid_root; struct btrfs_root *free_space_root; + struct btrfs_root *data_reloc_root; /* the log root tree is a directory of all the other log roots */ struct btrfs_root *log_root_tree; @@ -758,7 +760,6 @@ struct btrfs_fs_info { struct btrfs_workqueue *endio_workers; struct btrfs_workqueue *endio_meta_workers; struct btrfs_workqueue *endio_raid56_workers; - struct btrfs_workqueue *endio_repair_workers; struct btrfs_workqueue *rmw_workers; struct btrfs_workqueue *endio_meta_write_workers; struct btrfs_workqueue *endio_write_workers; @@ -970,7 +971,28 @@ enum { * is used to tell us when more checks are required */ BTRFS_ROOT_IN_TRANS_SETUP, - BTRFS_ROOT_REF_COWS, + + /* + * Set if tree blocks of this root can be shared by other roots. + * Only subvolume trees and their reloc trees have this bit set. + * Conflicts with TRACK_DIRTY bit. + * + * This affects two things: + * + * - How balance works + * For shareable roots, we need to use reloc tree and do path + * replacement for balance, and need various pre/post hooks for + * snapshot creation to handle them. + * + * While for non-shareable trees, we just simply do a tree search + * with COW. + * + * - How dirty roots are tracked + * For shareable roots, btrfs_record_root_in_trans() is needed to + * track them, while non-subvolume roots have TRACK_DIRTY bit, they + * don't need to set this manually. + */ + BTRFS_ROOT_SHAREABLE, BTRFS_ROOT_TRACK_DIRTY, BTRFS_ROOT_IN_RADIX, BTRFS_ROOT_ORPHAN_ITEM_INSERTED, @@ -1056,7 +1078,7 @@ struct btrfs_root { struct btrfs_key defrag_progress; struct btrfs_key defrag_max; - /* the dirty list is only used by non-reference counted roots */ + /* The dirty list is only used by non-shareable roots */ struct list_head dirty_list; struct list_head root_list; @@ -1146,6 +1168,9 @@ struct btrfs_root { /* Record pairs of swapped blocks for qgroup */ struct btrfs_qgroup_swapped_blocks swapped_blocks; + /* Used only by log trees, when logging csum items */ + struct extent_io_tree log_csum_range; + #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS u64 alloc_bytenr; #endif @@ -1341,7 +1366,7 @@ do { \ BTRFS_INODE_ROOT_ITEM_INIT) struct btrfs_map_token { - const struct extent_buffer *eb; + struct extent_buffer *eb; char *kaddr; unsigned long offset; }; @@ -1353,7 +1378,8 @@ static inline void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb) { token->eb = eb; - token->kaddr = NULL; + token->kaddr = page_address(eb->pages[0]); + token->offset = 0; } /* some macros to generate set/get functions for the struct fields. This @@ -1377,15 +1403,14 @@ static inline void btrfs_init_map_token(struct btrfs_map_token *token, sizeof(((type *)0)->member))) #define DECLARE_BTRFS_SETGET_BITS(bits) \ -u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ - const void *ptr, unsigned long off, \ - struct btrfs_map_token *token); \ -void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \ - unsigned long off, u##bits val, \ - struct btrfs_map_token *token); \ +u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \ + const void *ptr, unsigned long off); \ +void btrfs_set_token_##bits(struct btrfs_map_token *token, \ + const void *ptr, unsigned long off, \ + u##bits val); \ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ const void *ptr, unsigned long off); \ -void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ +void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \ unsigned long off, u##bits val); DECLARE_BTRFS_SETGET_BITS(8) @@ -1400,25 +1425,23 @@ static inline u##bits btrfs_##name(const struct extent_buffer *eb, \ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ return btrfs_get_##bits(eb, s, offsetof(type, member)); \ } \ -static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ +static inline void btrfs_set_##name(const struct extent_buffer *eb, type *s, \ u##bits val) \ { \ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ btrfs_set_##bits(eb, s, offsetof(type, member), val); \ } \ -static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\ - const type *s, \ - struct btrfs_map_token *token) \ +static inline u##bits btrfs_token_##name(struct btrfs_map_token *token, \ + const type *s) \ { \ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ - return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ + return btrfs_get_token_##bits(token, s, offsetof(type, member));\ } \ -static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ - type *s, u##bits val, \ - struct btrfs_map_token *token) \ +static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\ + type *s, u##bits val) \ { \ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ - btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ + btrfs_set_token_##bits(token, s, offsetof(type, member), val); \ } #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ @@ -1428,7 +1451,7 @@ static inline u##bits btrfs_##name(const struct extent_buffer *eb) \ u##bits res = le##bits##_to_cpu(p->member); \ return res; \ } \ -static inline void btrfs_set_##name(struct extent_buffer *eb, \ +static inline void btrfs_set_##name(const struct extent_buffer *eb, \ u##bits val) \ { \ type *p = page_address(eb->pages[0]); \ @@ -1446,7 +1469,7 @@ static inline void btrfs_set_##name(type *s, u##bits val) \ } -static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb, +static inline u64 btrfs_device_total_bytes(const struct extent_buffer *eb, struct btrfs_dev_item *s) { BUILD_BUG_ON(sizeof(u64) != @@ -1454,7 +1477,7 @@ static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb, return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes)); } -static inline void btrfs_set_device_total_bytes(struct extent_buffer *eb, +static inline void btrfs_set_device_total_bytes(const struct extent_buffer *eb, struct btrfs_dev_item *s, u64 val) { @@ -1558,13 +1581,13 @@ static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); } -static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, +static inline u64 btrfs_stripe_offset_nr(const struct extent_buffer *eb, struct btrfs_chunk *c, int nr) { return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); } -static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, +static inline u64 btrfs_stripe_devid_nr(const struct extent_buffer *eb, struct btrfs_chunk *c, int nr) { return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); @@ -1644,31 +1667,21 @@ BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, chunk_offset, 64); BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); - -static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) -{ - unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); - return (unsigned long)dev + ptr; -} - BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, generation, 64); BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); -BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); - - BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); -static inline void btrfs_tree_block_key(struct extent_buffer *eb, +static inline void btrfs_tree_block_key(const struct extent_buffer *eb, struct btrfs_tree_block_info *item, struct btrfs_disk_key *key) { read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); } -static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, +static inline void btrfs_set_tree_block_key(const struct extent_buffer *eb, struct btrfs_tree_block_info *item, struct btrfs_disk_key *key) { @@ -1706,12 +1719,6 @@ static inline u32 btrfs_extent_inline_ref_size(int type) return 0; } -BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); -BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, - generation, 64); -BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); -BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); - /* struct btrfs_node */ BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); @@ -1720,7 +1727,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, generation, 64); -static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) +static inline u64 btrfs_node_blockptr(const struct extent_buffer *eb, int nr) { unsigned long ptr; ptr = offsetof(struct btrfs_node, ptrs) + @@ -1728,7 +1735,7 @@ static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); } -static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, +static inline void btrfs_set_node_blockptr(const struct extent_buffer *eb, int nr, u64 val) { unsigned long ptr; @@ -1737,7 +1744,7 @@ static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); } -static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) +static inline u64 btrfs_node_ptr_generation(const struct extent_buffer *eb, int nr) { unsigned long ptr; ptr = offsetof(struct btrfs_node, ptrs) + @@ -1745,7 +1752,7 @@ static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); } -static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, +static inline void btrfs_set_node_ptr_generation(const struct extent_buffer *eb, int nr, u64 val) { unsigned long ptr; @@ -1763,7 +1770,7 @@ static inline unsigned long btrfs_node_key_ptr_offset(int nr) void btrfs_node_key(const struct extent_buffer *eb, struct btrfs_disk_key *disk_key, int nr); -static inline void btrfs_set_node_key(struct extent_buffer *eb, +static inline void btrfs_set_node_key(const struct extent_buffer *eb, struct btrfs_disk_key *disk_key, int nr) { unsigned long ptr; @@ -2498,8 +2505,6 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref); int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); -void btrfs_get_block_group_trimming(struct btrfs_block_group *cache); -void btrfs_put_block_group_trimming(struct btrfs_block_group *cache); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); enum btrfs_reserve_flush_enum { @@ -2512,6 +2517,7 @@ enum btrfs_reserve_flush_enum { BTRFS_RESERVE_FLUSH_LIMIT, BTRFS_RESERVE_FLUSH_EVICT, BTRFS_RESERVE_FLUSH_ALL, + BTRFS_RESERVE_FLUSH_ALL_STEAL, }; enum btrfs_flush_state { @@ -2551,7 +2557,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, - int level, int *slot); + int *slot); int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); int btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, @@ -2896,10 +2902,9 @@ void btrfs_free_inode(struct inode *inode); int btrfs_drop_inode(struct inode *inode); int __init btrfs_init_cachep(void); void __cold btrfs_destroy_cachep(void); -struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, +struct inode *btrfs_iget_path(struct super_block *s, u64 ino, struct btrfs_root *root, struct btrfs_path *path); -struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root); +struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root); struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 end); @@ -2929,6 +2934,9 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end); void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, u64 end, int uptodate); extern const struct dentry_operations btrfs_dentry_operations; +ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter); +extern const struct iomap_ops btrfs_dio_iomap_ops; +extern const struct iomap_dio_ops btrfs_dops; /* ioctl.c */ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); @@ -3381,6 +3389,9 @@ void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending); int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info); +struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, + u64 bytenr); +int btrfs_should_ignore_reloc_root(struct btrfs_root *root); /* scrub.c */ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d10c7be10f3b..7c6f0bbb54a5 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -358,16 +358,14 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); shash->tfm = fs_info->csum_shash; - crypto_shash_init(shash); /* * The super_block structure does not span the whole * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is * filled with zeros and is included in the checksum. */ - crypto_shash_update(shash, raw_disk_sb + BTRFS_CSUM_SIZE, - BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); - crypto_shash_final(shash, result); + crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE, + BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result); if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb))) return 1; @@ -709,9 +707,7 @@ static void end_workqueue_bio(struct bio *bio) else wq = fs_info->endio_write_workers; } else { - if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR)) - wq = fs_info->endio_repair_workers; - else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) wq = fs_info->endio_raid56_workers; else if (end_io_wq->metadata) wq = fs_info->endio_meta_workers; @@ -980,9 +976,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset, btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, "page private not zero on page %llu", (unsigned long long)page_offset(page)); - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); + detach_page_private(page); } } @@ -1137,9 +1131,12 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, root->log_transid = 0; root->log_transid_committed = -1; root->last_log_commit = 0; - if (!dummy) + if (!dummy) { extent_io_tree_init(fs_info, &root->dirty_log_pages, IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL); + extent_io_tree_init(fs_info, &root->log_csum_range, + IO_TREE_LOG_CSUM_RANGE, NULL); + } memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); @@ -1277,12 +1274,13 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; /* - * DON'T set REF_COWS for log trees + * DON'T set SHAREABLE bit for log trees. * - * log trees do not get reference counted because they go away - * before a real commit is actually done. They do store pointers - * to file data extents, and those reference counts still get - * updated (along with back refs to the log tree). + * Log trees are not exposed to user space thus can't be snapshotted, + * and they go away before a real commit is actually done. + * + * They do store pointers to file data extents, and those reference + * counts still get updated (along with back refs to the log tree). */ leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, @@ -1420,8 +1418,9 @@ static int btrfs_init_fs_root(struct btrfs_root *root) if (ret) goto fail; - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { - set_bit(BTRFS_ROOT_REF_COWS, &root->state); + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && + root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { + set_bit(BTRFS_ROOT_SHAREABLE, &root->state); btrfs_check_and_init_root_item(&root->root_item); } @@ -1526,6 +1525,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) btrfs_put_root(fs_info->uuid_root); btrfs_put_root(fs_info->free_space_root); btrfs_put_root(fs_info->fs_root); + btrfs_put_root(fs_info->data_reloc_root); btrfs_check_leaked_roots(fs_info); btrfs_extent_buffer_leak_debug_check(fs_info); kfree(fs_info->super_copy); @@ -1535,35 +1535,34 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, - struct btrfs_key *location, - bool check_ref) + u64 objectid, bool check_ref) { struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; int ret; - if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) + if (objectid == BTRFS_ROOT_TREE_OBJECTID) return btrfs_grab_root(fs_info->tree_root); - if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) + if (objectid == BTRFS_EXTENT_TREE_OBJECTID) return btrfs_grab_root(fs_info->extent_root); - if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) + if (objectid == BTRFS_CHUNK_TREE_OBJECTID) return btrfs_grab_root(fs_info->chunk_root); - if (location->objectid == BTRFS_DEV_TREE_OBJECTID) + if (objectid == BTRFS_DEV_TREE_OBJECTID) return btrfs_grab_root(fs_info->dev_root); - if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) + if (objectid == BTRFS_CSUM_TREE_OBJECTID) return btrfs_grab_root(fs_info->csum_root); - if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) + if (objectid == BTRFS_QUOTA_TREE_OBJECTID) return btrfs_grab_root(fs_info->quota_root) ? fs_info->quota_root : ERR_PTR(-ENOENT); - if (location->objectid == BTRFS_UUID_TREE_OBJECTID) + if (objectid == BTRFS_UUID_TREE_OBJECTID) return btrfs_grab_root(fs_info->uuid_root) ? fs_info->uuid_root : ERR_PTR(-ENOENT); - if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) + if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) return btrfs_grab_root(fs_info->free_space_root) ? fs_info->free_space_root : ERR_PTR(-ENOENT); again: - root = btrfs_lookup_fs_root(fs_info, location->objectid); + root = btrfs_lookup_fs_root(fs_info, objectid); if (root) { if (check_ref && btrfs_root_refs(&root->root_item) == 0) { btrfs_put_root(root); @@ -1572,7 +1571,10 @@ again: return root; } - root = btrfs_read_tree_root(fs_info->tree_root, location); + key.objectid = objectid; + key.type = BTRFS_ROOT_ITEM_KEY; + key.offset = (u64)-1; + root = btrfs_read_tree_root(fs_info->tree_root, &key); if (IS_ERR(root)) return root; @@ -1592,7 +1594,7 @@ again: } key.objectid = BTRFS_ORPHAN_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; - key.offset = location->objectid; + key.offset = objectid; ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); btrfs_free_path(path); @@ -1942,7 +1944,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->endio_workers); btrfs_destroy_workqueue(fs_info->endio_raid56_workers); - btrfs_destroy_workqueue(fs_info->endio_repair_workers); btrfs_destroy_workqueue(fs_info->rmw_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); @@ -1983,6 +1984,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) free_root_extent_buffers(info->quota_root); free_root_extent_buffers(info->uuid_root); free_root_extent_buffers(info->fs_root); + free_root_extent_buffers(info->data_reloc_root); if (free_chunk_root) free_root_extent_buffers(info->chunk_root); free_root_extent_buffers(info->free_space_root); @@ -1995,6 +1997,7 @@ void btrfs_put_root(struct btrfs_root *root) if (refcount_dec_and_test(&root->refs)) { WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); + WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)); if (root->anon_dev) free_anon_bdev(root->anon_dev); btrfs_drew_lock_destroy(&root->snapshot_lock); @@ -2145,8 +2148,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, fs_info->endio_raid56_workers = btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, max_active, 4); - fs_info->endio_repair_workers = - btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0); fs_info->rmw_workers = btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); fs_info->endio_write_workers = @@ -2170,7 +2171,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, fs_info->flush_workers && fs_info->endio_workers && fs_info->endio_meta_workers && fs_info->endio_meta_write_workers && - fs_info->endio_repair_workers && fs_info->endio_write_workers && fs_info->endio_raid56_workers && fs_info->endio_freespace_worker && fs_info->rmw_workers && fs_info->caching_workers && fs_info->readahead_workers && @@ -2292,6 +2292,19 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info) set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->csum_root = root; + /* + * This tree can share blocks with some other fs tree during relocation + * and we need a proper setup by btrfs_get_fs_root + */ + root = btrfs_get_fs_root(tree_root->fs_info, + BTRFS_DATA_RELOC_TREE_OBJECTID, true); + if (IS_ERR(root)) { + ret = PTR_ERR(root); + goto out; + } + set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); + fs_info->data_reloc_root = root; + location.objectid = BTRFS_QUOTA_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (!IS_ERR(root)) { @@ -2829,7 +2842,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device u64 generation; u64 features; u16 csum_type; - struct btrfs_key location; struct btrfs_super_block *disk_super; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *tree_root; @@ -3243,11 +3255,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device } } - location.objectid = BTRFS_FS_TREE_OBJECTID; - location.type = BTRFS_ROOT_ITEM_KEY; - location.offset = 0; - - fs_info->fs_root = btrfs_get_fs_root(fs_info, &location, true); + fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true); if (IS_ERR(fs_info->fs_root)) { err = PTR_ERR(fs_info->fs_root); btrfs_warn(fs_info, "failed to read fs tree: %d", err); @@ -3510,10 +3518,9 @@ static int write_dev_supers(struct btrfs_device *device, btrfs_set_super_bytenr(sb, bytenr); - crypto_shash_init(shash); - crypto_shash_update(shash, (const char *)sb + BTRFS_CSUM_SIZE, - BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); - crypto_shash_final(shash, sb->csum); + crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE, + BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, + sb->csum); page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index cd629113f61c..bf43245406c4 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -25,7 +25,6 @@ enum btrfs_wq_endio_type { BTRFS_WQ_ENDIO_METADATA, BTRFS_WQ_ENDIO_FREE_SPACE, BTRFS_WQ_ENDIO_RAID56, - BTRFS_WQ_ENDIO_DIO_REPAIR, }; static inline u64 btrfs_sb_offset(int mirror) @@ -67,8 +66,7 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info); struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, - struct btrfs_key *key, - bool check_ref); + u64 objectid, bool check_ref); void btrfs_free_fs_info(struct btrfs_fs_info *fs_info); int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 2bb25d2dc44b..1a8d419d9e1f 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -64,24 +64,15 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root; struct inode *inode; - struct btrfs_key key; if (objectid < BTRFS_FIRST_FREE_OBJECTID) return ERR_PTR(-ESTALE); - key.objectid = root_objectid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - - root = btrfs_get_fs_root(fs_info, &key, true); + root = btrfs_get_fs_root(fs_info, root_objectid, true); if (IS_ERR(root)) return ERR_CAST(root); - key.objectid = objectid; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - - inode = btrfs_iget(sb, &key, root); + inode = btrfs_iget(sb, objectid, root); btrfs_put_root(root); if (IS_ERR(inode)) return ERR_CAST(inode); @@ -200,9 +191,7 @@ struct dentry *btrfs_get_parent(struct dentry *child) found_key.offset, 0, 0); } - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root)); + return d_obtain_alias(btrfs_iget(fs_info->sb, key.objectid, root)); fail: btrfs_free_path(path); return ERR_PTR(ret); diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h index b4a7bad3e82e..b6561455b3c4 100644 --- a/fs/btrfs/extent-io-tree.h +++ b/fs/btrfs/extent-io-tree.h @@ -44,6 +44,7 @@ enum { IO_TREE_TRANS_DIRTY_PAGES, IO_TREE_ROOT_DIRTY_LOG_PAGES, IO_TREE_INODE_FILE_EXTENT, + IO_TREE_LOG_CSUM_RANGE, IO_TREE_SELFTEST, }; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 54a64d1e18c6..c0bc35f932bf 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2114,22 +2114,6 @@ static u64 find_middle(struct rb_root *root) } #endif -static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads) -{ - u64 num_bytes; - - num_bytes = heads * (sizeof(struct btrfs_extent_item) + - sizeof(struct btrfs_extent_inline_ref)); - if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) - num_bytes += heads * sizeof(struct btrfs_tree_block_info); - - /* - * We don't ever fill up leaves all the way so multiply by 2 just to be - * closer to what we're really going to want to use. - */ - return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info)); -} - /* * Takes the number of bytes to be csumm'ed and figures out how many leaves it * would require to store the csums for that many bytes. @@ -2442,7 +2426,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, nritems = btrfs_header_nritems(buf); level = btrfs_header_level(buf); - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0) + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) return 0; if (full_backref) @@ -2932,7 +2916,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) &trimmed); list_del_init(&block_group->bg_list); - btrfs_put_block_group_trimming(block_group); + btrfs_unfreeze_block_group(block_group); btrfs_put_block_group(block_group); if (ret) { @@ -3369,6 +3353,7 @@ static struct btrfs_block_group *btrfs_lock_cluster( struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, int delalloc) + __acquires(&cluster->refill_lock) { struct btrfs_block_group *used_bg = NULL; @@ -5501,8 +5486,6 @@ out: */ if (!for_reloc && !root_dropped) btrfs_add_dead_root(root); - if (err && err != -EAGAIN) - btrfs_handle_fs_error(fs_info, err, NULL); return err; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 39e45b8a5031..68c96057ad2d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2333,7 +2333,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, return 0; } -int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num) +int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num) { struct btrfs_fs_info *fs_info = eb->fs_info; u64 start = eb->start; @@ -2537,8 +2537,9 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, return 0; } -bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, - struct io_failure_record *failrec, int failed_mirror) +static bool btrfs_check_repairable(struct inode *inode, bool needs_validation, + struct io_failure_record *failrec, + int failed_mirror) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); int num_copies; @@ -2561,7 +2562,7 @@ bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, * a) deliver good data to the caller * b) correct the bad sectors on disk */ - if (failed_bio_pages > 1) { + if (needs_validation) { /* * to fulfill b), we need to know the exact failing sectors, as * we don't want to rewrite any more than the failed ones. thus, @@ -2600,94 +2601,115 @@ bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, return true; } - -struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, - struct io_failure_record *failrec, - struct page *page, int pg_offset, int icsum, - bio_end_io_t *endio_func, void *data) +static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct bio *bio; - struct btrfs_io_bio *btrfs_failed_bio; - struct btrfs_io_bio *btrfs_bio; + u64 len = 0; + const u32 blocksize = inode->i_sb->s_blocksize; - bio = btrfs_io_bio_alloc(1); - bio->bi_end_io = endio_func; - bio->bi_iter.bi_sector = failrec->logical >> 9; - bio->bi_iter.bi_size = 0; - bio->bi_private = data; + /* + * If bi_status is BLK_STS_OK, then this was a checksum error, not an + * I/O error. In this case, we already know exactly which sector was + * bad, so we don't need to validate. + */ + if (bio->bi_status == BLK_STS_OK) + return false; - btrfs_failed_bio = btrfs_io_bio(failed_bio); - if (btrfs_failed_bio->csum) { - u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); + /* + * We need to validate each sector individually if the failed I/O was + * for multiple sectors. + * + * There are a few possible bios that can end up here: + * 1. A buffered read bio, which is not cloned. + * 2. A direct I/O read bio, which is cloned. + * 3. A (buffered or direct) repair bio, which is not cloned. + * + * For cloned bios (case 2), we can get the size from + * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get + * it from the bvecs. + */ + if (bio_flagged(bio, BIO_CLONED)) { + if (btrfs_io_bio(bio)->iter.bi_size > blocksize) + return true; + } else { + struct bio_vec *bvec; + int i; - btrfs_bio = btrfs_io_bio(bio); - btrfs_bio->csum = btrfs_bio->csum_inline; - icsum *= csum_size; - memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum, - csum_size); + bio_for_each_bvec_all(bvec, bio, i) { + len += bvec->bv_len; + if (len > blocksize) + return true; + } } - - bio_add_page(bio, page, failrec->len, pg_offset); - - return bio; + return false; } -/* - * This is a generic handler for readpage errors. If other copies exist, read - * those and write back good data to the failed position. Does not investigate - * in remapping the failed extent elsewhere, hoping the device will be smart - * enough to do this as needed - */ -static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, - struct page *page, u64 start, u64 end, - int failed_mirror) +blk_status_t btrfs_submit_read_repair(struct inode *inode, + struct bio *failed_bio, u64 phy_offset, + struct page *page, unsigned int pgoff, + u64 start, u64 end, int failed_mirror, + submit_bio_hook_t *submit_bio_hook) { struct io_failure_record *failrec; - struct inode *inode = page->mapping->host; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; - struct bio *bio; - int read_mode = 0; + struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio); + const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits; + bool need_validation; + struct bio *repair_bio; + struct btrfs_io_bio *repair_io_bio; blk_status_t status; int ret; - unsigned failed_bio_pages = failed_bio->bi_iter.bi_size >> PAGE_SHIFT; + + btrfs_debug(fs_info, + "repair read error: read error at %llu", start); BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); ret = btrfs_get_io_failure_record(inode, start, end, &failrec); if (ret) - return ret; + return errno_to_blk_status(ret); + + need_validation = btrfs_io_needs_validation(inode, failed_bio); - if (!btrfs_check_repairable(inode, failed_bio_pages, failrec, + if (!btrfs_check_repairable(inode, need_validation, failrec, failed_mirror)) { free_io_failure(failure_tree, tree, failrec); - return -EIO; + return BLK_STS_IOERR; } - if (failed_bio_pages > 1) - read_mode |= REQ_FAILFAST_DEV; + repair_bio = btrfs_io_bio_alloc(1); + repair_io_bio = btrfs_io_bio(repair_bio); + repair_bio->bi_opf = REQ_OP_READ; + if (need_validation) + repair_bio->bi_opf |= REQ_FAILFAST_DEV; + repair_bio->bi_end_io = failed_bio->bi_end_io; + repair_bio->bi_iter.bi_sector = failrec->logical >> 9; + repair_bio->bi_private = failed_bio->bi_private; + + if (failed_io_bio->csum) { + const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); - phy_offset >>= inode->i_sb->s_blocksize_bits; - bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, - start - page_offset(page), - (int)phy_offset, failed_bio->bi_end_io, - NULL); - bio->bi_opf = REQ_OP_READ | read_mode; + repair_io_bio->csum = repair_io_bio->csum_inline; + memcpy(repair_io_bio->csum, + failed_io_bio->csum + csum_size * icsum, csum_size); + } + + bio_add_page(repair_bio, page, failrec->len, pgoff); + repair_io_bio->logical = failrec->start; + repair_io_bio->iter = repair_bio->bi_iter; btrfs_debug(btrfs_sb(inode->i_sb), - "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d", - read_mode, failrec->this_mirror, failrec->in_validation); +"repair read error: submitting new read to mirror %d, in_validation=%d", + failrec->this_mirror, failrec->in_validation); - status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror, - failrec->bio_flags); + status = submit_bio_hook(inode, repair_bio, failrec->this_mirror, + failrec->bio_flags); if (status) { free_io_failure(failure_tree, tree, failrec); - bio_put(bio); - ret = blk_status_to_errno(status); + bio_put(repair_bio); } - - return ret; + return status; } /* lots and lots of room for performance fixes in the end_bio funcs */ @@ -2859,9 +2881,10 @@ static void end_bio_extent_readpage(struct bio *bio) * If it can't handle the error it will return -EIO and * we remain responsible for that page. */ - ret = bio_readpage_error(bio, offset, page, start, end, - mirror); - if (ret == 0) { + if (!btrfs_submit_read_repair(inode, bio, offset, page, + start - page_offset(page), + start, end, mirror, + tree->ops->submit_bio_hook)) { uptodate = !bio->bi_status; offset += len; continue; @@ -3076,22 +3099,16 @@ static int submit_extent_page(unsigned int opf, static void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page) { - if (!PagePrivate(page)) { - SetPagePrivate(page); - get_page(page); - set_page_private(page, (unsigned long)eb); - } else { + if (!PagePrivate(page)) + attach_page_private(page, eb); + else WARN_ON(page->private != (unsigned long)eb); - } } void set_page_extent_mapped(struct page *page) { - if (!PagePrivate(page)) { - SetPagePrivate(page); - get_page(page); - set_page_private(page, EXTENT_PAGE_PRIVATE); - } + if (!PagePrivate(page)) + attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE); } static struct extent_map * @@ -4367,51 +4384,32 @@ int extent_writepages(struct address_space *mapping, return ret; } -int extent_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages) +void extent_readahead(struct readahead_control *rac) { struct bio *bio = NULL; unsigned long bio_flags = 0; struct page *pagepool[16]; struct extent_map *em_cached = NULL; - int nr = 0; u64 prev_em_start = (u64)-1; + int nr; - while (!list_empty(pages)) { - u64 contig_end = 0; - - for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) { - struct page *page = lru_to_page(pages); - - prefetchw(&page->flags); - list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, page->index, - readahead_gfp_mask(mapping))) { - put_page(page); - break; - } - - pagepool[nr++] = page; - contig_end = page_offset(page) + PAGE_SIZE - 1; - } - - if (nr) { - u64 contig_start = page_offset(pagepool[0]); + while ((nr = readahead_page_batch(rac, pagepool))) { + u64 contig_start = page_offset(pagepool[0]); + u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1; - ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end); + ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end); - contiguous_readpages(pagepool, nr, contig_start, - contig_end, &em_cached, &bio, &bio_flags, - &prev_em_start); - } + contiguous_readpages(pagepool, nr, contig_start, contig_end, + &em_cached, &bio, &bio_flags, &prev_em_start); } if (em_cached) free_extent_map(em_cached); - if (bio) - return submit_one_bio(bio, 0, bio_flags); - return 0; + if (bio) { + if (submit_one_bio(bio, 0, bio_flags)) + return; + } } /* @@ -4887,7 +4885,7 @@ static void __free_extent_buffer(struct extent_buffer *eb) kmem_cache_free(extent_buffer_cache, eb); } -int extent_buffer_under_io(struct extent_buffer *eb) +int extent_buffer_under_io(const struct extent_buffer *eb) { return (atomic_read(&eb->io_pages) || test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || @@ -4929,10 +4927,7 @@ static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) * We need to make sure we haven't be attached * to a new eb. */ - ClearPagePrivate(page); - set_page_private(page, 0); - /* One for the page private */ - put_page(page); + detach_page_private(page); } if (mapped) @@ -4995,7 +4990,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, return eb; } -struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) +struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src) { int i; struct page *p; @@ -5401,7 +5396,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb) release_extent_buffer(eb); } -void clear_extent_buffer_dirty(struct extent_buffer *eb) +void clear_extent_buffer_dirty(const struct extent_buffer *eb) { int i; int num_pages; @@ -5599,8 +5594,7 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, struct page *page; char *kaddr; char *dst = (char *)dstv; - size_t start_offset = offset_in_page(eb->start); - unsigned long i = (start_offset + start) >> PAGE_SHIFT; + unsigned long i = start >> PAGE_SHIFT; if (start + len > eb->len) { WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n", @@ -5609,7 +5603,7 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, return; } - offset = offset_in_page(start_offset + start); + offset = offset_in_page(start); while (len > 0) { page = eb->pages[i]; @@ -5634,14 +5628,13 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, struct page *page; char *kaddr; char __user *dst = (char __user *)dstv; - size_t start_offset = offset_in_page(eb->start); - unsigned long i = (start_offset + start) >> PAGE_SHIFT; + unsigned long i = start >> PAGE_SHIFT; int ret = 0; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = offset_in_page(start_offset + start); + offset = offset_in_page(start); while (len > 0) { page = eb->pages[i]; @@ -5662,48 +5655,6 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, return ret; } -/* - * return 0 if the item is found within a page. - * return 1 if the item spans two pages. - * return -EINVAL otherwise. - */ -int map_private_extent_buffer(const struct extent_buffer *eb, - unsigned long start, unsigned long min_len, - char **map, unsigned long *map_start, - unsigned long *map_len) -{ - size_t offset; - char *kaddr; - struct page *p; - size_t start_offset = offset_in_page(eb->start); - unsigned long i = (start_offset + start) >> PAGE_SHIFT; - unsigned long end_i = (start_offset + start + min_len - 1) >> - PAGE_SHIFT; - - if (start + min_len > eb->len) { - WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n", - eb->start, eb->len, start, min_len); - return -EINVAL; - } - - if (i != end_i) - return 1; - - if (i == 0) { - offset = start_offset; - *map_start = 0; - } else { - offset = 0; - *map_start = ((u64)i << PAGE_SHIFT) - start_offset; - } - - p = eb->pages[i]; - kaddr = page_address(p); - *map = kaddr + offset; - *map_len = PAGE_SIZE - offset; - return 0; -} - int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, unsigned long start, unsigned long len) { @@ -5712,14 +5663,13 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, struct page *page; char *kaddr; char *ptr = (char *)ptrv; - size_t start_offset = offset_in_page(eb->start); - unsigned long i = (start_offset + start) >> PAGE_SHIFT; + unsigned long i = start >> PAGE_SHIFT; int ret = 0; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = offset_in_page(start_offset + start); + offset = offset_in_page(start); while (len > 0) { page = eb->pages[i]; @@ -5739,7 +5689,7 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, return ret; } -void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, +void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, const void *srcv) { char *kaddr; @@ -5750,7 +5700,7 @@ void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, BTRFS_FSID_SIZE); } -void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv) +void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv) { char *kaddr; @@ -5760,7 +5710,7 @@ void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv) BTRFS_FSID_SIZE); } -void write_extent_buffer(struct extent_buffer *eb, const void *srcv, +void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, unsigned long start, unsigned long len) { size_t cur; @@ -5768,13 +5718,12 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, struct page *page; char *kaddr; char *src = (char *)srcv; - size_t start_offset = offset_in_page(eb->start); - unsigned long i = (start_offset + start) >> PAGE_SHIFT; + unsigned long i = start >> PAGE_SHIFT; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = offset_in_page(start_offset + start); + offset = offset_in_page(start); while (len > 0) { page = eb->pages[i]; @@ -5791,20 +5740,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, } } -void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, +void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, unsigned long len) { size_t cur; size_t offset; struct page *page; char *kaddr; - size_t start_offset = offset_in_page(eb->start); - unsigned long i = (start_offset + start) >> PAGE_SHIFT; + unsigned long i = start >> PAGE_SHIFT; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = offset_in_page(start_offset + start); + offset = offset_in_page(start); while (len > 0) { page = eb->pages[i]; @@ -5820,8 +5768,8 @@ void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, } } -void copy_extent_buffer_full(struct extent_buffer *dst, - struct extent_buffer *src) +void copy_extent_buffer_full(const struct extent_buffer *dst, + const struct extent_buffer *src) { int i; int num_pages; @@ -5834,7 +5782,8 @@ void copy_extent_buffer_full(struct extent_buffer *dst, page_address(src->pages[i])); } -void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, +void copy_extent_buffer(const struct extent_buffer *dst, + const struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, unsigned long len) { @@ -5843,12 +5792,11 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, size_t offset; struct page *page; char *kaddr; - size_t start_offset = offset_in_page(dst->start); - unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT; + unsigned long i = dst_offset >> PAGE_SHIFT; WARN_ON(src->len != dst_len); - offset = offset_in_page(start_offset + dst_offset); + offset = offset_in_page(dst_offset); while (len > 0) { page = dst->pages[i]; @@ -5879,12 +5827,11 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, * This helper hides the ugliness of finding the byte in an extent buffer which * contains a given bit. */ -static inline void eb_bitmap_offset(struct extent_buffer *eb, +static inline void eb_bitmap_offset(const struct extent_buffer *eb, unsigned long start, unsigned long nr, unsigned long *page_index, size_t *page_offset) { - size_t start_offset = offset_in_page(eb->start); size_t byte_offset = BIT_BYTE(nr); size_t offset; @@ -5893,7 +5840,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, * the bitmap item in the extent buffer + the offset of the byte in the * bitmap item. */ - offset = start_offset + start + byte_offset; + offset = start + byte_offset; *page_index = offset >> PAGE_SHIFT; *page_offset = offset_in_page(offset); @@ -5905,7 +5852,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, * @start: offset of the bitmap item in the extent buffer * @nr: bit number to test */ -int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, +int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, unsigned long nr) { u8 *kaddr; @@ -5927,7 +5874,7 @@ int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, * @pos: bit number of the first bit * @len: number of bits to set */ -void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, +void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { u8 *kaddr; @@ -5969,8 +5916,9 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, * @pos: bit number of the first bit * @len: number of bits to clear */ -void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, - unsigned long pos, unsigned long len) +void extent_buffer_bitmap_clear(const struct extent_buffer *eb, + unsigned long start, unsigned long pos, + unsigned long len) { u8 *kaddr; struct page *page; @@ -6031,14 +5979,14 @@ static void copy_pages(struct page *dst_page, struct page *src_page, memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); } -void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, - unsigned long src_offset, unsigned long len) +void memcpy_extent_buffer(const struct extent_buffer *dst, + unsigned long dst_offset, unsigned long src_offset, + unsigned long len) { struct btrfs_fs_info *fs_info = dst->fs_info; size_t cur; size_t dst_off_in_page; size_t src_off_in_page; - size_t start_offset = offset_in_page(dst->start); unsigned long dst_i; unsigned long src_i; @@ -6056,11 +6004,11 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, } while (len > 0) { - dst_off_in_page = offset_in_page(start_offset + dst_offset); - src_off_in_page = offset_in_page(start_offset + src_offset); + dst_off_in_page = offset_in_page(dst_offset); + src_off_in_page = offset_in_page(src_offset); - dst_i = (start_offset + dst_offset) >> PAGE_SHIFT; - src_i = (start_offset + src_offset) >> PAGE_SHIFT; + dst_i = dst_offset >> PAGE_SHIFT; + src_i = src_offset >> PAGE_SHIFT; cur = min(len, (unsigned long)(PAGE_SIZE - src_off_in_page)); @@ -6076,8 +6024,9 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, } } -void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, - unsigned long src_offset, unsigned long len) +void memmove_extent_buffer(const struct extent_buffer *dst, + unsigned long dst_offset, unsigned long src_offset, + unsigned long len) { struct btrfs_fs_info *fs_info = dst->fs_info; size_t cur; @@ -6085,7 +6034,6 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, size_t src_off_in_page; unsigned long dst_end = dst_offset + len - 1; unsigned long src_end = src_offset + len - 1; - size_t start_offset = offset_in_page(dst->start); unsigned long dst_i; unsigned long src_i; @@ -6106,11 +6054,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, return; } while (len > 0) { - dst_i = (start_offset + dst_end) >> PAGE_SHIFT; - src_i = (start_offset + src_end) >> PAGE_SHIFT; + dst_i = dst_end >> PAGE_SHIFT; + src_i = src_end >> PAGE_SHIFT; - dst_off_in_page = offset_in_page(start_offset + dst_end); - src_off_in_page = offset_in_page(start_offset + src_end); + dst_off_in_page = offset_in_page(dst_end); + src_off_in_page = offset_in_page(src_end); cur = min_t(unsigned long, len, src_off_in_page + 1); cur = min(cur, dst_off_in_page + 1); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 2ed65bd0760e..87f60a48f750 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -5,6 +5,7 @@ #include <linux/rbtree.h> #include <linux/refcount.h> +#include <linux/fiemap.h> #include "ulist.h" /* @@ -66,6 +67,10 @@ struct btrfs_io_bio; struct io_failure_record; struct extent_io_tree; +typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio, + int mirror_num, + unsigned long bio_flags); + typedef blk_status_t (extent_submit_bio_start_t)(void *private_data, struct bio *bio, u64 bio_offset); @@ -74,8 +79,7 @@ struct extent_io_ops { * The following callbacks must be always defined, the function * pointer will be called unconditionally. */ - blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio, - int mirror_num, unsigned long bio_flags); + submit_bio_hook_t *submit_bio_hook; int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, struct page *page, u64 start, u64 end, int mirror); @@ -198,8 +202,7 @@ int extent_writepages(struct address_space *mapping, struct writeback_control *wbc); int btree_write_cache_pages(struct address_space *mapping, struct writeback_control *wbc); -int extent_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages); +void extent_readahead(struct readahead_control *rac); int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len); void set_page_extent_mapped(struct page *page); @@ -210,7 +213,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, unsigned long len); struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); -struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); +struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src); struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); void free_extent_buffer(struct extent_buffer *eb); @@ -228,7 +231,7 @@ static inline int num_extent_pages(const struct extent_buffer *eb) (eb->start >> PAGE_SHIFT); } -static inline int extent_buffer_uptodate(struct extent_buffer *eb) +static inline int extent_buffer_uptodate(const struct extent_buffer *eb) { return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); } @@ -241,37 +244,37 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dst, int read_extent_buffer_to_user(const struct extent_buffer *eb, void __user *dst, unsigned long start, unsigned long len); -void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); -void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, +void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src); +void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, const void *src); -void write_extent_buffer(struct extent_buffer *eb, const void *src, +void write_extent_buffer(const struct extent_buffer *eb, const void *src, unsigned long start, unsigned long len); -void copy_extent_buffer_full(struct extent_buffer *dst, - struct extent_buffer *src); -void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, +void copy_extent_buffer_full(const struct extent_buffer *dst, + const struct extent_buffer *src); +void copy_extent_buffer(const struct extent_buffer *dst, + const struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, unsigned long len); -void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, - unsigned long src_offset, unsigned long len); -void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, - unsigned long src_offset, unsigned long len); -void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, +void memcpy_extent_buffer(const struct extent_buffer *dst, + unsigned long dst_offset, unsigned long src_offset, + unsigned long len); +void memmove_extent_buffer(const struct extent_buffer *dst, + unsigned long dst_offset, unsigned long src_offset, + unsigned long len); +void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, unsigned long len); -int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, +int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, unsigned long pos); -void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, +void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len); -void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, - unsigned long pos, unsigned long len); -void clear_extent_buffer_dirty(struct extent_buffer *eb); +void extent_buffer_bitmap_clear(const struct extent_buffer *eb, + unsigned long start, unsigned long pos, + unsigned long len); +void clear_extent_buffer_dirty(const struct extent_buffer *eb); bool set_extent_buffer_dirty(struct extent_buffer *eb); void set_extent_buffer_uptodate(struct extent_buffer *eb); void clear_extent_buffer_uptodate(struct extent_buffer *eb); -int extent_buffer_under_io(struct extent_buffer *eb); -int map_private_extent_buffer(const struct extent_buffer *eb, - unsigned long offset, unsigned long min_len, - char **map, unsigned long *map_start, - unsigned long *map_len); +int extent_buffer_under_io(const struct extent_buffer *eb); void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, @@ -290,7 +293,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, u64 length, u64 logical, struct page *page, unsigned int pg_offset, int mirror_num); void end_extent_writepage(struct page *page, int err, u64 start, u64 end); -int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num); +int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num); /* * When IO fails, either with EIO or csum verification fails, we @@ -312,12 +315,12 @@ struct io_failure_record { }; -bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, - struct io_failure_record *failrec, int fail_mirror); -struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, - struct io_failure_record *failrec, - struct page *page, int pg_offset, int icsum, - bio_end_io_t *endio_func, void *data); +blk_status_t btrfs_submit_read_repair(struct inode *inode, + struct bio *failed_bio, u64 phy_offset, + struct page *page, unsigned int pgoff, + u64 start, u64 end, int failed_mirror, + submit_bio_hook_t *submit_bio_hook); + #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS bool find_lock_delalloc_range(struct inode *inode, struct page *locked_page, u64 *start, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b618ad5339ba..706a3128e192 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -242,11 +242,13 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, /** * btrfs_lookup_bio_sums - Look up checksums for a bio. * @inode: inode that the bio is for. - * @bio: bio embedded in btrfs_io_bio. + * @bio: bio to look up. * @offset: Unless (u64)-1, look up checksums for this offset in the file. * If (u64)-1, use the page offsets from the bio instead. - * @dst: Buffer of size btrfs_super_csum_size() used to return checksum. If - * NULL, the checksum is returned in btrfs_io_bio(bio)->csum instead. + * @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return + * checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If + * NULL, the checksum buffer is allocated and returned in + * btrfs_io_bio(bio)->csum instead. * * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. */ @@ -256,7 +258,6 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct bio_vec bvec; struct bvec_iter iter; - struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); struct btrfs_csum_item *item = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_path *path; @@ -277,6 +278,8 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; if (!dst) { + struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); + if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { btrfs_bio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS); @@ -598,13 +601,12 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, index = 0; } - crypto_shash_init(shash); data = kmap_atomic(bvec.bv_page); - crypto_shash_update(shash, data + bvec.bv_offset + crypto_shash_digest(shash, data + bvec.bv_offset + (i * fs_info->sectorsize), - fs_info->sectorsize); + fs_info->sectorsize, + sums->sums + index); kunmap_atomic(data); - crypto_shash_final(shash, (char *)(sums->sums + index)); index += csum_size; offset += fs_info->sectorsize; this_sum_bytes += fs_info->sectorsize; @@ -869,7 +871,7 @@ again: } ret = PTR_ERR(item); if (ret != -EFBIG && ret != -ENOENT) - goto fail_unlock; + goto out; if (ret == -EFBIG) { u32 item_size; @@ -887,10 +889,12 @@ again: nritems = btrfs_header_nritems(path->nodes[0]); if (!nritems || (path->slots[0] >= nritems - 1)) { ret = btrfs_next_leaf(root, path); - if (ret == 1) + if (ret < 0) { + goto out; + } else if (ret > 0) { found_next = 1; - if (ret != 0) goto insert; + } slot = path->slots[0]; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); @@ -905,14 +909,27 @@ again: } /* - * at this point, we know the tree has an item, but it isn't big - * enough yet to put our csum in. Grow it + * At this point, we know the tree has a checksum item that ends at an + * offset matching the start of the checksum range we want to insert. + * We try to extend that item as much as possible and then add as many + * checksums to it as they fit. + * + * First check if the leaf has enough free space for at least one + * checksum. If it has go directly to the item extension code, otherwise + * release the path and do a search for insertion before the extension. */ + if (btrfs_leaf_free_space(leaf) >= csum_size) { + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); + csum_offset = (bytenr - found_key.offset) >> + fs_info->sb->s_blocksize_bits; + goto extend_csum; + } + btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &file_key, path, csum_size, 1); if (ret < 0) - goto fail_unlock; + goto out; if (ret > 0) { if (path->slots[0] == 0) @@ -931,19 +948,13 @@ again: goto insert; } +extend_csum: if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / csum_size) { int extend_nr; u64 tmp; u32 diff; - u32 free_space; - - if (btrfs_leaf_free_space(leaf) < - sizeof(struct btrfs_item) + csum_size * 2) - goto insert; - free_space = btrfs_leaf_free_space(leaf) - - sizeof(struct btrfs_item) - csum_size; tmp = sums->len - total_bytes; tmp >>= fs_info->sb->s_blocksize_bits; WARN_ON(tmp < 1); @@ -954,7 +965,7 @@ again: MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); - diff = min(free_space, diff); + diff = min_t(u32, btrfs_leaf_free_space(leaf), diff); diff /= csum_size; diff *= csum_size; @@ -985,9 +996,9 @@ insert: ins_size); path->leave_spinning = 0; if (ret < 0) - goto fail_unlock; + goto out; if (WARN_ON(ret != 0)) - goto fail_unlock; + goto out; leaf = path->nodes[0]; csum: item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); @@ -1017,9 +1028,6 @@ found: out: btrfs_free_path(path); return ret; - -fail_unlock: - goto out; } void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 719e68ab552c..fde125616687 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -275,26 +275,18 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, { struct btrfs_root *inode_root; struct inode *inode; - struct btrfs_key key; struct btrfs_ioctl_defrag_range_args range; int num_defrag; int ret; /* get the inode */ - key.objectid = defrag->root; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - - inode_root = btrfs_get_fs_root(fs_info, &key, true); + inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); if (IS_ERR(inode_root)) { ret = PTR_ERR(inode_root); goto cleanup; } - key.objectid = defrag->ino; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, inode_root); + inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root); btrfs_put_root(inode_root); if (IS_ERR(inode)) { ret = PTR_ERR(inode); @@ -775,7 +767,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent) modify_tree = 0; - update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || + update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || root == fs_info->tree_root); while (1) { recow = 0; @@ -1817,21 +1809,61 @@ again: return num_written ? num_written : ret; } -static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) +static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, + const struct iov_iter *iter, loff_t offset) +{ + const unsigned int blocksize_mask = fs_info->sectorsize - 1; + + if (offset & blocksize_mask) + return -EINVAL; + + if (iov_iter_alignment(iter) & blocksize_mask) + return -EINVAL; + + return 0; +} + +static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); - loff_t pos; - ssize_t written; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + loff_t pos = iocb->ki_pos; + ssize_t written = 0; ssize_t written_buffered; loff_t endbyte; int err; + size_t count = 0; + bool relock = false; - written = generic_file_direct_write(iocb, from); + if (check_direct_IO(fs_info, from, pos)) + goto buffered; + + count = iov_iter_count(from); + /* + * If the write DIO is beyond the EOF, we need update the isize, but it + * is protected by i_mutex. So we can not unlock the i_mutex at this + * case. + */ + if (pos + count <= inode->i_size) { + inode_unlock(inode); + relock = true; + } else if (iocb->ki_flags & IOCB_NOWAIT) { + return -EAGAIN; + } + + down_read(&BTRFS_I(inode)->dio_sem); + written = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dops, + is_sync_kiocb(iocb)); + up_read(&BTRFS_I(inode)->dio_sem); + + if (relock) + inode_lock(inode); if (written < 0 || !iov_iter_count(from)) return written; +buffered: pos = iocb->ki_pos; written_buffered = btrfs_buffered_write(iocb, from); if (written_buffered < 0) { @@ -1970,7 +2002,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, atomic_inc(&BTRFS_I(inode)->sync_writers); if (iocb->ki_flags & IOCB_DIRECT) { - num_written = __btrfs_direct_write(iocb, from); + num_written = btrfs_direct_write(iocb, from); } else { num_written = btrfs_buffered_write(iocb, from); if (num_written > 0) @@ -3484,9 +3516,54 @@ static int btrfs_file_open(struct inode *inode, struct file *filp) return generic_file_open(inode, filp); } +static int check_direct_read(struct btrfs_fs_info *fs_info, + const struct iov_iter *iter, loff_t offset) +{ + int ret; + int i, seg; + + ret = check_direct_IO(fs_info, iter, offset); + if (ret < 0) + return ret; + + for (seg = 0; seg < iter->nr_segs; seg++) + for (i = seg + 1; i < iter->nr_segs; i++) + if (iter->iov[seg].iov_base == iter->iov[i].iov_base) + return -EINVAL; + return 0; +} + +static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to) +{ + struct inode *inode = file_inode(iocb->ki_filp); + ssize_t ret; + + if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos)) + return 0; + + inode_lock_shared(inode); + ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dops, + is_sync_kiocb(iocb)); + inode_unlock_shared(inode); + return ret; +} + +static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + ssize_t ret = 0; + + if (iocb->ki_flags & IOCB_DIRECT) { + ret = btrfs_direct_read(iocb, to); + if (ret < 0) + return ret; + } + + return generic_file_buffered_read(iocb, to, ret); +} + const struct file_operations btrfs_file_operations = { .llseek = btrfs_file_llseek, - .read_iter = generic_file_read_iter, + .read_iter = btrfs_file_read_iter, .splice_read = generic_file_splice_read, .write_iter = btrfs_file_write_iter, .mmap = btrfs_file_mmap, diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 3613da065a73..55955bd424d7 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -82,7 +82,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, * sure NOFS is set to keep us from deadlocking. */ nofs_flag = memalloc_nofs_save(); - inode = btrfs_iget_path(fs_info->sb, &location, root, path); + inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path); btrfs_release_path(path); memalloc_nofs_restore(nofs_flag); if (IS_ERR(inode)) @@ -1190,13 +1190,10 @@ out: if (ret) { invalidate_inode_pages2(inode->i_mapping); BTRFS_I(inode)->generation = 0; - if (block_group) { -#ifdef CONFIG_BTRFS_DEBUG - btrfs_err(root->fs_info, - "failed to write free space cache for block group %llu", - block_group->start); -#endif - } + if (block_group) + btrfs_debug(root->fs_info, + "failed to write free space cache for block group %llu error %d", + block_group->start, ret); } btrfs_update_inode(trans, root, inode); @@ -1415,11 +1412,9 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans, ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl, block_group, &block_group->io_ctl, trans); if (ret) { -#ifdef CONFIG_BTRFS_DEBUG - btrfs_err(fs_info, - "failed to write free space cache for block group %llu", - block_group->start); -#endif + btrfs_debug(fs_info, + "failed to write free space cache for block group %llu error %d", + block_group->start, ret); spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&block_group->lock); @@ -3762,46 +3757,6 @@ out: return ret; } -void btrfs_get_block_group_trimming(struct btrfs_block_group *cache) -{ - atomic_inc(&cache->trimming); -} - -void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group) -{ - struct btrfs_fs_info *fs_info = block_group->fs_info; - struct extent_map_tree *em_tree; - struct extent_map *em; - bool cleanup; - - spin_lock(&block_group->lock); - cleanup = (atomic_dec_and_test(&block_group->trimming) && - block_group->removed); - spin_unlock(&block_group->lock); - - if (cleanup) { - mutex_lock(&fs_info->chunk_mutex); - em_tree = &fs_info->mapping_tree; - write_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, block_group->start, - 1); - BUG_ON(!em); /* logic error, can't happen */ - remove_extent_mapping(em_tree, em); - write_unlock(&em_tree->lock); - mutex_unlock(&fs_info->chunk_mutex); - - /* once for us and once for the tree */ - free_extent_map(em); - free_extent_map(em); - - /* - * We've left one free space entry and other tasks trimming - * this block group have left 1 entry each one. Free them. - */ - __btrfs_remove_free_space_cache(block_group->free_space_ctl); - } -} - int btrfs_trim_block_group(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen) { @@ -3816,7 +3771,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group, spin_unlock(&block_group->lock); return 0; } - btrfs_get_block_group_trimming(block_group); + btrfs_freeze_block_group(block_group); spin_unlock(&block_group->lock); ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false); @@ -3829,7 +3784,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group, if (rem) reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end)); out: - btrfs_put_block_group_trimming(block_group); + btrfs_unfreeze_block_group(block_group); return ret; } @@ -3846,11 +3801,11 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, spin_unlock(&block_group->lock); return 0; } - btrfs_get_block_group_trimming(block_group); + btrfs_freeze_block_group(block_group); spin_unlock(&block_group->lock); ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async); - btrfs_put_block_group_trimming(block_group); + btrfs_unfreeze_block_group(block_group); return ret; } @@ -3868,13 +3823,13 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, spin_unlock(&block_group->lock); return 0; } - btrfs_get_block_group_trimming(block_group); + btrfs_freeze_block_group(block_group); spin_unlock(&block_group->lock); ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen, async); - btrfs_put_block_group_trimming(block_group); + btrfs_unfreeze_block_group(block_group); return ret; } @@ -4035,11 +3990,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, if (release_metadata) btrfs_delalloc_release_metadata(BTRFS_I(inode), inode->i_size, true); -#ifdef CONFIG_BTRFS_DEBUG - btrfs_err(fs_info, - "failed to write free ino cache for root %llu", - root->root_key.objectid); -#endif + btrfs_debug(fs_info, + "failed to write free ino cache for root %llu error %d", + root->root_key.objectid, ret); } return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 320d1062068d..31ac8c682f19 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5,7 +5,6 @@ #include <linux/kernel.h> #include <linux/bio.h> -#include <linux/buffer_head.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/pagemap.h> @@ -49,17 +48,18 @@ #include "qgroup.h" #include "delalloc-space.h" #include "block-group.h" +#include "space-info.h" struct btrfs_iget_args { - struct btrfs_key *location; + u64 ino; struct btrfs_root *root; }; struct btrfs_dio_data { u64 reserve; - u64 unsubmitted_oe_range_start; - u64 unsubmitted_oe_range_end; - int overwrite; + loff_t length; + ssize_t submitted; + struct extent_changeset *data_reserved; }; static const struct inode_operations btrfs_dir_inode_operations; @@ -1142,7 +1142,7 @@ out_unlock: */ if (extent_reserved) { extent_clear_unlock_delalloc(inode, start, - start + cur_alloc_size, + start + cur_alloc_size - 1, locked_page, clear_bits, page_ops); @@ -1355,6 +1355,66 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, return 1; } +static int fallback_to_cow(struct inode *inode, struct page *locked_page, + const u64 start, const u64 end, + int *page_started, unsigned long *nr_written) +{ + const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode)); + const u64 range_bytes = end + 1 - start; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + u64 range_start = start; + u64 count; + + /* + * If EXTENT_NORESERVE is set it means that when the buffered write was + * made we had not enough available data space and therefore we did not + * reserve data space for it, since we though we could do NOCOW for the + * respective file range (either there is prealloc extent or the inode + * has the NOCOW bit set). + * + * However when we need to fallback to COW mode (because for example the + * block group for the corresponding extent was turned to RO mode by a + * scrub or relocation) we need to do the following: + * + * 1) We increment the bytes_may_use counter of the data space info. + * If COW succeeds, it allocates a new data extent and after doing + * that it decrements the space info's bytes_may_use counter and + * increments its bytes_reserved counter by the same amount (we do + * this at btrfs_add_reserved_bytes()). So we need to increment the + * bytes_may_use counter to compensate (when space is reserved at + * buffered write time, the bytes_may_use counter is incremented); + * + * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so + * that if the COW path fails for any reason, it decrements (through + * extent_clear_unlock_delalloc()) the bytes_may_use counter of the + * data space info, which we incremented in the step above. + * + * If we need to fallback to cow and the inode corresponds to a free + * space cache inode, we must also increment bytes_may_use of the data + * space_info for the same reason. Space caches always get a prealloc + * extent for them, however scrub or balance may have set the block + * group that contains that extent to RO mode. + */ + count = count_range_bits(io_tree, &range_start, end, range_bytes, + EXTENT_NORESERVE, 0); + if (count > 0 || is_space_ino) { + const u64 bytes = is_space_ino ? range_bytes : count; + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_space_info *sinfo = fs_info->data_sinfo; + + spin_lock(&sinfo->lock); + btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); + spin_unlock(&sinfo->lock); + + if (count > 0) + clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, + 0, 0, NULL); + } + + return cow_file_range(inode, locked_page, start, end, page_started, + nr_written, 1); +} + /* * when nowcow writeback call back. This checks for snapshots or COW copies * of the extents that exist in the file, and COWs the file as required. @@ -1602,9 +1662,9 @@ out_check: * NOCOW, following one which needs to be COW'ed */ if (cow_start != (u64)-1) { - ret = cow_file_range(inode, locked_page, - cow_start, found_key.offset - 1, - page_started, nr_written, 1); + ret = fallback_to_cow(inode, locked_page, cow_start, + found_key.offset - 1, + page_started, nr_written); if (ret) { if (nocow) btrfs_dec_nocow_writers(fs_info, @@ -1693,8 +1753,8 @@ out_check: if (cow_start != (u64)-1) { cur_offset = end; - ret = cow_file_range(inode, locked_page, cow_start, end, - page_started, nr_written, 1); + ret = fallback_to_cow(inode, locked_page, cow_start, end, + page_started, nr_written); if (ret) goto error; } @@ -2726,10 +2786,9 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, btrfs_queue_work(wq, &ordered_extent->work); } -static int __readpage_endio_check(struct inode *inode, - struct btrfs_io_bio *io_bio, - int icsum, struct page *page, - int pgoff, u64 start, size_t len) +static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio, + int icsum, struct page *page, int pgoff, u64 start, + size_t len) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); @@ -2743,9 +2802,7 @@ static int __readpage_endio_check(struct inode *inode, kaddr = kmap_atomic(page); shash->tfm = fs_info->csum_shash; - crypto_shash_init(shash); - crypto_shash_update(shash, kaddr + pgoff, len); - crypto_shash_final(shash, csum); + crypto_shash_digest(shash, kaddr + pgoff, len, csum); if (memcmp(csum, csum_expected, csum_size)) goto zeroit; @@ -2790,8 +2847,8 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, } phy_offset >>= inode->i_sb->s_blocksize_bits; - return __readpage_endio_check(inode, io_bio, phy_offset, page, offset, - start, (size_t)(end - start + 1)); + return check_data_csum(inode, io_bio, phy_offset, page, offset, start, + (size_t)(end - start + 1)); } /* @@ -2981,7 +3038,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) found_key.objectid = found_key.offset; found_key.type = BTRFS_INODE_ITEM_KEY; found_key.offset = 0; - inode = btrfs_iget(fs_info->sb, &found_key, root); + inode = btrfs_iget(fs_info->sb, last_objectid, root); ret = PTR_ERR_OR_ZERO(inode); if (ret && ret != -ENOENT) goto out; @@ -3000,18 +3057,16 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) * orphan must not get deleted. * find_dead_roots already ran before us, so if this * is a snapshot deletion, we should find the root - * in the dead_roots list + * in the fs_roots radix tree. */ - spin_lock(&fs_info->trans_lock); - list_for_each_entry(dead_root, &fs_info->dead_roots, - root_list) { - if (dead_root->root_key.objectid == - found_key.objectid) { - is_dead_root = 1; - break; - } - } - spin_unlock(&fs_info->trans_lock); + + spin_lock(&fs_info->fs_roots_radix_lock); + dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, + (unsigned long)found_key.objectid); + if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) + is_dead_root = 1; + spin_unlock(&fs_info->fs_roots_radix_lock); + if (is_dead_root) { /* prevent this orphan from being found again */ key.offset = found_key.objectid - 1; @@ -3357,43 +3412,40 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_init_map_token(&token, leaf); - btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); - btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); - btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, - &token); - btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); - btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); - - btrfs_set_token_timespec_sec(leaf, &item->atime, - inode->i_atime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, &item->atime, - inode->i_atime.tv_nsec, &token); - - btrfs_set_token_timespec_sec(leaf, &item->mtime, - inode->i_mtime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, &item->mtime, - inode->i_mtime.tv_nsec, &token); - - btrfs_set_token_timespec_sec(leaf, &item->ctime, - inode->i_ctime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, &item->ctime, - inode->i_ctime.tv_nsec, &token); - - btrfs_set_token_timespec_sec(leaf, &item->otime, - BTRFS_I(inode)->i_otime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, &item->otime, - BTRFS_I(inode)->i_otime.tv_nsec, &token); - - btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), - &token); - btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, - &token); - btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode), - &token); - btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); - btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); - btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); - btrfs_set_token_inode_block_group(leaf, item, 0, &token); + btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); + btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); + btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); + btrfs_set_token_inode_mode(&token, item, inode->i_mode); + btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); + + btrfs_set_token_timespec_sec(&token, &item->atime, + inode->i_atime.tv_sec); + btrfs_set_token_timespec_nsec(&token, &item->atime, + inode->i_atime.tv_nsec); + + btrfs_set_token_timespec_sec(&token, &item->mtime, + inode->i_mtime.tv_sec); + btrfs_set_token_timespec_nsec(&token, &item->mtime, + inode->i_mtime.tv_nsec); + + btrfs_set_token_timespec_sec(&token, &item->ctime, + inode->i_ctime.tv_sec); + btrfs_set_token_timespec_nsec(&token, &item->ctime, + inode->i_ctime.tv_nsec); + + btrfs_set_token_timespec_sec(&token, &item->otime, + BTRFS_I(inode)->i_otime.tv_sec); + btrfs_set_token_timespec_nsec(&token, &item->otime, + BTRFS_I(inode)->i_otime.tv_nsec); + + btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); + btrfs_set_token_inode_generation(&token, item, + BTRFS_I(inode)->generation); + btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); + btrfs_set_token_inode_transid(&token, item, trans->transid); + btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); + btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags); + btrfs_set_token_inode_block_group(&token, item, 0); } /* @@ -3618,7 +3670,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) * 1 for the inode ref * 1 for the inode */ - return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); + return btrfs_start_transaction_fallback_global_rsv(root, 5); } static int btrfs_unlink(struct inode *dir, struct dentry *dentry) @@ -4108,11 +4160,12 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); /* - * for non-free space inodes and ref cows, we want to back off from - * time to time + * For non-free space inodes and non-shareable roots, we want to back + * off from time to time. This means all inodes in subvolume roots, + * reloc roots, and data reloc roots. */ if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && - test_bit(BTRFS_ROOT_REF_COWS, &root->state)) + test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) be_nice = true; path = btrfs_alloc_path(); @@ -4120,20 +4173,19 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->reada = READA_BACK; - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1, &cached_state); - /* - * We want to drop from the next block forward in case this new size is - * not block aligned since we will be keeping the last block of the - * extent just the way it is. - */ - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || - root == fs_info->tree_root) + /* + * We want to drop from the next block forward in case this + * new size is not block aligned since we will be keeping the + * last block of the extent just the way it is. + */ btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size, fs_info->sectorsize), (u64)-1, 0); + } /* * This function is also used to drop the items in the log tree before @@ -4241,7 +4293,7 @@ search_again: extent_num_bytes); num_dec = (orig_num_bytes - extent_num_bytes); - if (test_bit(BTRFS_ROOT_REF_COWS, + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && extent_start != 0) inode_sub_bytes(inode, num_dec); @@ -4257,7 +4309,7 @@ search_again: num_dec = btrfs_file_extent_num_bytes(leaf, fi); if (extent_start != 0) { found_extent = 1; - if (test_bit(BTRFS_ROOT_REF_COWS, + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) inode_sub_bytes(inode, num_dec); } @@ -4293,7 +4345,7 @@ search_again: clear_len = fs_info->sectorsize; } - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) inode_sub_bytes(inode, item_end + 1 - new_size); } delete: @@ -4334,8 +4386,7 @@ delete: should_throttle = false; if (found_extent && - (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || - root == fs_info->tree_root)) { + root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { struct btrfs_ref ref = { 0 }; bytes_deleted += extent_num_bytes; @@ -4759,10 +4810,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) truncate_setsize(inode, newsize); - /* Disable nonlocked read DIO to avoid the endless truncate */ - btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); inode_dio_wait(inode); - btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); ret = btrfs_truncate(inode, newsize == oldsize); if (ret && inode->i_nlink) { @@ -4856,8 +4904,8 @@ static void evict_inode_truncate_pages(struct inode *inode) /* * Keep looping until we have no more ranges in the io tree. - * We can have ongoing bios started by readpages (called from readahead) - * that have their endio callback (extent_io.c:end_bio_extent_readpage) + * We can have ongoing bios started by readahead that have + * their endio callback (extent_io.c:end_bio_extent_readpage) * still in progress (unlocked the pages in the bio but did not yet * unlocked the ranges in the io tree). Therefore this means some * ranges can still be locked and eviction started because before @@ -5154,7 +5202,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, btrfs_release_path(path); - new_root = btrfs_get_fs_root(fs_info, location, true); + new_root = btrfs_get_fs_root(fs_info, location->objectid, true); if (IS_ERR(new_root)) { err = PTR_ERR(new_root); goto out; @@ -5232,9 +5280,11 @@ static void inode_tree_del(struct inode *inode) static int btrfs_init_locked_inode(struct inode *inode, void *p) { struct btrfs_iget_args *args = p; - inode->i_ino = args->location->objectid; - memcpy(&BTRFS_I(inode)->location, args->location, - sizeof(*args->location)); + + inode->i_ino = args->ino; + BTRFS_I(inode)->location.objectid = args->ino; + BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; + BTRFS_I(inode)->location.offset = 0; BTRFS_I(inode)->root = btrfs_grab_root(args->root); BUG_ON(args->root && !BTRFS_I(inode)->root); return 0; @@ -5243,19 +5293,19 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; - return args->location->objectid == BTRFS_I(inode)->location.objectid && + + return args->ino == BTRFS_I(inode)->location.objectid && args->root == BTRFS_I(inode)->root; } -static struct inode *btrfs_iget_locked(struct super_block *s, - struct btrfs_key *location, +static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, struct btrfs_root *root) { struct inode *inode; struct btrfs_iget_args args; - unsigned long hashval = btrfs_inode_hash(location->objectid, root); + unsigned long hashval = btrfs_inode_hash(ino, root); - args.location = location; + args.ino = ino; args.root = root; inode = iget5_locked(s, hashval, btrfs_find_actor, @@ -5265,17 +5315,17 @@ static struct inode *btrfs_iget_locked(struct super_block *s, } /* - * Get an inode object given its location and corresponding root. + * Get an inode object given its inode number and corresponding root. * Path can be preallocated to prevent recursing back to iget through * allocator. NULL is also valid but may require an additional allocation * later. */ -struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, +struct inode *btrfs_iget_path(struct super_block *s, u64 ino, struct btrfs_root *root, struct btrfs_path *path) { struct inode *inode; - inode = btrfs_iget_locked(s, location, root); + inode = btrfs_iget_locked(s, ino, root); if (!inode) return ERR_PTR(-ENOMEM); @@ -5302,10 +5352,9 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, return inode; } -struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root) +struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) { - return btrfs_iget_path(s, location, root, NULL); + return btrfs_iget_path(s, ino, root, NULL); } static struct inode *new_simple_dir(struct super_block *s, @@ -5374,7 +5423,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) return ERR_PTR(ret); if (location.type == BTRFS_INODE_ITEM_KEY) { - inode = btrfs_iget(dir->i_sb, &location, root); + inode = btrfs_iget(dir->i_sb, location.objectid, root); if (IS_ERR(inode)) return inode; @@ -5398,7 +5447,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) else inode = new_simple_dir(dir->i_sb, &location, sub_root); } else { - inode = btrfs_iget(dir->i_sb, &location, sub_root); + inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); } if (root != sub_root) btrfs_put_root(sub_root); @@ -5779,7 +5828,8 @@ int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) static int btrfs_insert_inode_locked(struct inode *inode) { struct btrfs_iget_args args; - args.location = &BTRFS_I(inode)->location; + + args.ino = BTRFS_I(inode)->location.objectid; args.root = BTRFS_I(inode)->root; return insert_inode_locked4(inode, @@ -6991,7 +7041,7 @@ out: } static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, - struct extent_state **cached_state, int writing) + struct extent_state **cached_state, bool writing) { struct btrfs_ordered_extent *ordered; int ret = 0; @@ -7050,11 +7100,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, * for it to complete) and then invalidate the pages for * this range (through invalidate_inode_pages2_range()), * but that can lead us to a deadlock with a concurrent - * call to readpages() (a buffered read or a defrag call + * call to readahead (a buffered read or a defrag call * triggered a readahead) on a page lock due to an * ordered dio extent we created before but did not have * yet a corresponding bio submitted (whence it can not - * complete), which makes readpages() wait for that + * complete), which makes readahead wait for that * ordered extent to complete while holding a lock on * that page. */ @@ -7129,30 +7179,7 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, } -static int btrfs_get_blocks_direct_read(struct extent_map *em, - struct buffer_head *bh_result, - struct inode *inode, - u64 start, u64 len) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - - if (em->block_start == EXTENT_MAP_HOLE || - test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) - return -ENOENT; - - len = min(len, em->len - (start - em->start)); - - bh_result->b_blocknr = (em->block_start + (start - em->start)) >> - inode->i_blkbits; - bh_result->b_size = len; - bh_result->b_bdev = fs_info->fs_devices->latest_bdev; - set_buffer_mapped(bh_result); - - return 0; -} - static int btrfs_get_blocks_direct_write(struct extent_map **map, - struct buffer_head *bh_result, struct inode *inode, struct btrfs_dio_data *dio_data, u64 start, u64 len) @@ -7214,7 +7241,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, } /* this will cow the extent */ - len = bh_result->b_size; free_extent_map(em); *map = em = btrfs_new_extent_direct(inode, start, len); if (IS_ERR(em)) { @@ -7225,64 +7251,73 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, len = min(len, em->len - (start - em->start)); skip_cow: - bh_result->b_blocknr = (em->block_start + (start - em->start)) >> - inode->i_blkbits; - bh_result->b_size = len; - bh_result->b_bdev = fs_info->fs_devices->latest_bdev; - set_buffer_mapped(bh_result); - - if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) - set_buffer_new(bh_result); - /* * Need to update the i_size under the extent lock so buffered * readers will get the updated i_size when we unlock. */ - if (!dio_data->overwrite && start + len > i_size_read(inode)) + if (start + len > i_size_read(inode)) i_size_write(inode, start + len); - WARN_ON(dio_data->reserve < len); dio_data->reserve -= len; - dio_data->unsubmitted_oe_range_end = start + len; - current->journal_info = dio_data; out: return ret; } -static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) +static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, + loff_t length, unsigned flags, struct iomap *iomap, + struct iomap *srcmap) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map *em; struct extent_state *cached_state = NULL; struct btrfs_dio_data *dio_data = NULL; - u64 start = iblock << inode->i_blkbits; u64 lockstart, lockend; - u64 len = bh_result->b_size; + const bool write = !!(flags & IOMAP_WRITE); int ret = 0; + u64 len = length; + bool unlock_extents = false; - if (!create) + if (!write) len = min_t(u64, len, fs_info->sectorsize); lockstart = start; lockend = start + len - 1; - if (current->journal_info) { - /* - * Need to pull our outstanding extents and set journal_info to NULL so - * that anything that needs to check if there's a transaction doesn't get - * confused. - */ - dio_data = current->journal_info; - current->journal_info = NULL; + /* + * The generic stuff only does filemap_write_and_wait_range, which + * isn't enough if we've written compressed pages to this area, so we + * need to flush the dirty pages again to make absolutely sure that any + * outstanding dirty pages are on disk. + */ + if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, + &BTRFS_I(inode)->runtime_flags)) + ret = filemap_fdatawrite_range(inode->i_mapping, start, + start + length - 1); + + dio_data = kzalloc(sizeof(*dio_data), GFP_NOFS); + if (!dio_data) + return -ENOMEM; + + dio_data->length = length; + if (write) { + dio_data->reserve = round_up(length, fs_info->sectorsize); + ret = btrfs_delalloc_reserve_space(inode, + &dio_data->data_reserved, + start, dio_data->reserve); + if (ret) { + extent_changeset_free(dio_data->data_reserved); + kfree(dio_data); + return ret; + } } + iomap->private = dio_data; + /* * If this errors out it's because we couldn't invalidate pagecache for * this range and we need to fallback to buffered. */ - if (lock_extent_direct(inode, lockstart, lockend, &cached_state, - create)) { + if (lock_extent_direct(inode, lockstart, lockend, &cached_state, write)) { ret = -ENOTBLK; goto err; } @@ -7314,35 +7349,47 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, goto unlock_err; } - if (create) { - ret = btrfs_get_blocks_direct_write(&em, bh_result, inode, - dio_data, start, len); + len = min(len, em->len - (start - em->start)); + if (write) { + ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, + start, len); if (ret < 0) goto unlock_err; - - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, - lockend, &cached_state); + unlock_extents = true; + /* Recalc len in case the new em is smaller than requested */ + len = min(len, em->len - (start - em->start)); } else { - ret = btrfs_get_blocks_direct_read(em, bh_result, inode, - start, len); - /* Can be negative only if we read from a hole */ - if (ret < 0) { - ret = 0; - free_extent_map(em); - goto unlock_err; - } /* * We need to unlock only the end area that we aren't using. * The rest is going to be unlocked by the endio routine. */ - lockstart = start + bh_result->b_size; - if (lockstart < lockend) { - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - lockstart, lockend, &cached_state); - } else { - free_extent_state(cached_state); - } + lockstart = start + len; + if (lockstart < lockend) + unlock_extents = true; + } + + if (unlock_extents) + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + lockstart, lockend, &cached_state); + else + free_extent_state(cached_state); + + /* + * Translate extent map information to iomap. + * We trim the extents (and move the addr) even though iomap code does + * that, since we have locked only the parts we are performing I/O in. + */ + if ((em->block_start == EXTENT_MAP_HOLE) || + (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { + iomap->addr = IOMAP_NULL_ADDR; + iomap->type = IOMAP_HOLE; + } else { + iomap->addr = em->block_start + (start - em->start); + iomap->type = IOMAP_MAPPED; } + iomap->offset = start; + iomap->bdev = fs_info->fs_devices->latest_bdev; + iomap->length = len; free_extent_map(em); @@ -7352,370 +7399,152 @@ unlock_err: unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); err: - if (dio_data) - current->journal_info = dio_data; + if (dio_data) { + btrfs_delalloc_release_space(inode, dio_data->data_reserved, + start, dio_data->reserve, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->reserve); + extent_changeset_free(dio_data->data_reserved); + kfree(dio_data); + } return ret; } -static inline blk_status_t submit_dio_repair_bio(struct inode *inode, - struct bio *bio, - int mirror_num) +static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, + ssize_t written, unsigned flags, struct iomap *iomap) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - blk_status_t ret; + int ret = 0; + struct btrfs_dio_data *dio_data = iomap->private; + size_t submitted = dio_data->submitted; + const bool write = !!(flags & IOMAP_WRITE); - BUG_ON(bio_op(bio) == REQ_OP_WRITE); + if (!write && (iomap->type == IOMAP_HOLE)) { + /* If reading from a hole, unlock and return */ + unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1); + goto out; + } - ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR); - if (ret) - return ret; + if (submitted < length) { + pos += submitted; + length -= submitted; + if (write) + __endio_write_update_ordered(inode, pos, length, false); + else + unlock_extent(&BTRFS_I(inode)->io_tree, pos, + pos + length - 1); + ret = -ENOTBLK; + } - ret = btrfs_map_bio(fs_info, bio, mirror_num); + if (write) { + if (dio_data->reserve) + btrfs_delalloc_release_space(inode, + dio_data->data_reserved, pos, + dio_data->reserve, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->length); + extent_changeset_free(dio_data->data_reserved); + } +out: + kfree(dio_data); + iomap->private = NULL; return ret; } -static int btrfs_check_dio_repairable(struct inode *inode, - struct bio *failed_bio, - struct io_failure_record *failrec, - int failed_mirror) +static void btrfs_dio_private_put(struct btrfs_dio_private *dip) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - int num_copies; - - num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len); - if (num_copies == 1) { - /* - * we only have a single copy of the data, so don't bother with - * all the retry and error correction code that follows. no - * matter what the error is, it is very likely to persist. - */ - btrfs_debug(fs_info, - "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d", - num_copies, failrec->this_mirror, failed_mirror); - return 0; - } - - failrec->failed_mirror = failed_mirror; - failrec->this_mirror++; - if (failrec->this_mirror == failed_mirror) - failrec->this_mirror++; + /* + * This implies a barrier so that stores to dio_bio->bi_status before + * this and loads of dio_bio->bi_status after this are fully ordered. + */ + if (!refcount_dec_and_test(&dip->refs)) + return; - if (failrec->this_mirror > num_copies) { - btrfs_debug(fs_info, - "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d", - num_copies, failrec->this_mirror, failed_mirror); - return 0; + if (bio_op(dip->dio_bio) == REQ_OP_WRITE) { + __endio_write_update_ordered(dip->inode, dip->logical_offset, + dip->bytes, + !dip->dio_bio->bi_status); + } else { + unlock_extent(&BTRFS_I(dip->inode)->io_tree, + dip->logical_offset, + dip->logical_offset + dip->bytes - 1); } - return 1; + bio_endio(dip->dio_bio); + kfree(dip); } -static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio, - struct page *page, unsigned int pgoff, - u64 start, u64 end, int failed_mirror, - bio_end_io_t *repair_endio, void *repair_arg) +static blk_status_t submit_dio_repair_bio(struct inode *inode, struct bio *bio, + int mirror_num, + unsigned long bio_flags) { - struct io_failure_record *failrec; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; - struct bio *bio; - int isector; - unsigned int read_mode = 0; - int segs; - int ret; - blk_status_t status; - struct bio_vec bvec; + struct btrfs_dio_private *dip = bio->bi_private; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + blk_status_t ret; - BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); + BUG_ON(bio_op(bio) == REQ_OP_WRITE); - ret = btrfs_get_io_failure_record(inode, start, end, &failrec); + ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); if (ret) - return errno_to_blk_status(ret); - - ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, - failed_mirror); - if (!ret) { - free_io_failure(failure_tree, io_tree, failrec); - return BLK_STS_IOERR; - } - - segs = bio_segments(failed_bio); - bio_get_first_bvec(failed_bio, &bvec); - if (segs > 1 || - (bvec.bv_len > btrfs_inode_sectorsize(inode))) - read_mode |= REQ_FAILFAST_DEV; - - isector = start - btrfs_io_bio(failed_bio)->logical; - isector >>= inode->i_sb->s_blocksize_bits; - bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, - pgoff, isector, repair_endio, repair_arg); - bio->bi_opf = REQ_OP_READ | read_mode; - - btrfs_debug(BTRFS_I(inode)->root->fs_info, - "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d", - read_mode, failrec->this_mirror, failrec->in_validation); - - status = submit_dio_repair_bio(inode, bio, failrec->this_mirror); - if (status) { - free_io_failure(failure_tree, io_tree, failrec); - bio_put(bio); - } - - return status; -} - -struct btrfs_retry_complete { - struct completion done; - struct inode *inode; - u64 start; - int uptodate; -}; + return ret; -static void btrfs_retry_endio_nocsum(struct bio *bio) -{ - struct btrfs_retry_complete *done = bio->bi_private; - struct inode *inode = done->inode; - struct bio_vec *bvec; - struct extent_io_tree *io_tree, *failure_tree; - struct bvec_iter_all iter_all; - - if (bio->bi_status) - goto end; - - ASSERT(bio->bi_vcnt == 1); - io_tree = &BTRFS_I(inode)->io_tree; - failure_tree = &BTRFS_I(inode)->io_failure_tree; - ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode)); - - done->uptodate = 1; - ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, iter_all) - clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree, - io_tree, done->start, bvec->bv_page, - btrfs_ino(BTRFS_I(inode)), 0); -end: - complete(&done->done); - bio_put(bio); + refcount_inc(&dip->refs); + ret = btrfs_map_bio(fs_info, bio, mirror_num); + if (ret) + refcount_dec(&dip->refs); + return ret; } -static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode, - struct btrfs_io_bio *io_bio) +static blk_status_t btrfs_check_read_dio_bio(struct inode *inode, + struct btrfs_io_bio *io_bio, + const bool uptodate) { - struct btrfs_fs_info *fs_info; + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + const u32 sectorsize = fs_info->sectorsize; + struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); struct bio_vec bvec; struct bvec_iter iter; - struct btrfs_retry_complete done; - u64 start; - unsigned int pgoff; - u32 sectorsize; - int nr_sectors; - blk_status_t ret; + u64 start = io_bio->logical; + int icsum = 0; blk_status_t err = BLK_STS_OK; - fs_info = BTRFS_I(inode)->root->fs_info; - sectorsize = fs_info->sectorsize; - - start = io_bio->logical; - done.inode = inode; - io_bio->bio.bi_iter = io_bio->iter; + __bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) { + unsigned int i, nr_sectors, pgoff; - bio_for_each_segment(bvec, &io_bio->bio, iter) { nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); pgoff = bvec.bv_offset; - -next_block_or_try_again: - done.uptodate = 0; - done.start = start; - init_completion(&done.done); - - ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page, - pgoff, start, start + sectorsize - 1, - io_bio->mirror_num, - btrfs_retry_endio_nocsum, &done); - if (ret) { - err = ret; - goto next; - } - - wait_for_completion_io(&done.done); - - if (!done.uptodate) { - /* We might have another mirror, so try again */ - goto next_block_or_try_again; - } - -next: - start += sectorsize; - - nr_sectors--; - if (nr_sectors) { - pgoff += sectorsize; + for (i = 0; i < nr_sectors; i++) { ASSERT(pgoff < PAGE_SIZE); - goto next_block_or_try_again; - } - } - - return err; -} - -static void btrfs_retry_endio(struct bio *bio) -{ - struct btrfs_retry_complete *done = bio->bi_private; - struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); - struct extent_io_tree *io_tree, *failure_tree; - struct inode *inode = done->inode; - struct bio_vec *bvec; - int uptodate; - int ret; - int i = 0; - struct bvec_iter_all iter_all; - - if (bio->bi_status) - goto end; - - uptodate = 1; - - ASSERT(bio->bi_vcnt == 1); - ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode)); - - io_tree = &BTRFS_I(inode)->io_tree; - failure_tree = &BTRFS_I(inode)->io_failure_tree; - - ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, iter_all) { - ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, - bvec->bv_offset, done->start, - bvec->bv_len); - if (!ret) - clean_io_failure(BTRFS_I(inode)->root->fs_info, - failure_tree, io_tree, done->start, - bvec->bv_page, - btrfs_ino(BTRFS_I(inode)), - bvec->bv_offset); - else - uptodate = 0; - i++; - } - - done->uptodate = uptodate; -end: - complete(&done->done); - bio_put(bio); -} - -static blk_status_t __btrfs_subio_endio_read(struct inode *inode, - struct btrfs_io_bio *io_bio, blk_status_t err) -{ - struct btrfs_fs_info *fs_info; - struct bio_vec bvec; - struct bvec_iter iter; - struct btrfs_retry_complete done; - u64 start; - u64 offset = 0; - u32 sectorsize; - int nr_sectors; - unsigned int pgoff; - int csum_pos; - bool uptodate = (err == 0); - int ret; - blk_status_t status; - - fs_info = BTRFS_I(inode)->root->fs_info; - sectorsize = fs_info->sectorsize; - - err = BLK_STS_OK; - start = io_bio->logical; - done.inode = inode; - io_bio->bio.bi_iter = io_bio->iter; - - bio_for_each_segment(bvec, &io_bio->bio, iter) { - nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); - - pgoff = bvec.bv_offset; -next_block: - if (uptodate) { - csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset); - ret = __readpage_endio_check(inode, io_bio, csum_pos, - bvec.bv_page, pgoff, start, sectorsize); - if (likely(!ret)) - goto next; - } -try_again: - done.uptodate = 0; - done.start = start; - init_completion(&done.done); - - status = dio_read_error(inode, &io_bio->bio, bvec.bv_page, - pgoff, start, start + sectorsize - 1, - io_bio->mirror_num, btrfs_retry_endio, - &done); - if (status) { - err = status; - goto next; - } - - wait_for_completion_io(&done.done); - - if (!done.uptodate) { - /* We might have another mirror, so try again */ - goto try_again; - } -next: - offset += sectorsize; - start += sectorsize; - - ASSERT(nr_sectors); - - nr_sectors--; - if (nr_sectors) { + if (uptodate && + (!csum || !check_data_csum(inode, io_bio, icsum, + bvec.bv_page, pgoff, + start, sectorsize))) { + clean_io_failure(fs_info, failure_tree, io_tree, + start, bvec.bv_page, + btrfs_ino(BTRFS_I(inode)), + pgoff); + } else { + blk_status_t status; + + status = btrfs_submit_read_repair(inode, + &io_bio->bio, + start - io_bio->logical, + bvec.bv_page, pgoff, + start, + start + sectorsize - 1, + io_bio->mirror_num, + submit_dio_repair_bio); + if (status) + err = status; + } + start += sectorsize; + icsum++; pgoff += sectorsize; - ASSERT(pgoff < PAGE_SIZE); - goto next_block; } } - return err; } -static blk_status_t btrfs_subio_endio_read(struct inode *inode, - struct btrfs_io_bio *io_bio, blk_status_t err) -{ - bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - - if (skip_csum) { - if (unlikely(err)) - return __btrfs_correct_data_nocsum(inode, io_bio); - else - return BLK_STS_OK; - } else { - return __btrfs_subio_endio_read(inode, io_bio, err); - } -} - -static void btrfs_endio_direct_read(struct bio *bio) -{ - struct btrfs_dio_private *dip = bio->bi_private; - struct inode *inode = dip->inode; - struct bio *dio_bio; - struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); - blk_status_t err = bio->bi_status; - - if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) - err = btrfs_subio_endio_read(inode, io_bio, err); - - unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, - dip->logical_offset + dip->bytes - 1); - dio_bio = dip->dio_bio; - - kfree(dip); - - dio_bio->bi_status = err; - dio_end_io(dio_bio); - btrfs_io_bio_free_csum(io_bio); - bio_put(bio); -} - static void __endio_write_update_ordered(struct inode *inode, const u64 offset, const u64 bytes, const bool uptodate) @@ -7759,21 +7588,6 @@ static void __endio_write_update_ordered(struct inode *inode, } } -static void btrfs_endio_direct_write(struct bio *bio) -{ - struct btrfs_dio_private *dip = bio->bi_private; - struct bio *dio_bio = dip->dio_bio; - - __endio_write_update_ordered(dip->inode, dip->logical_offset, - dip->bytes, !bio->bi_status); - - kfree(dip); - - dio_bio->bi_status = bio->bi_status; - dio_end_io(dio_bio); - bio_put(bio); -} - static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data, struct bio *bio, u64 offset) { @@ -7797,64 +7611,16 @@ static void btrfs_end_dio_bio(struct bio *bio) (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size, err); - if (dip->subio_endio) - err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); - - if (err) { - /* - * We want to perceive the errors flag being set before - * decrementing the reference count. We don't need a barrier - * since atomic operations with a return value are fully - * ordered as per atomic_t.txt - */ - dip->errors = 1; + if (bio_op(bio) == REQ_OP_READ) { + err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio), + !err); } - /* if there are more bios still pending for this dio, just exit */ - if (!atomic_dec_and_test(&dip->pending_bios)) - goto out; + if (err) + dip->dio_bio->bi_status = err; - if (dip->errors) { - bio_io_error(dip->orig_bio); - } else { - dip->dio_bio->bi_status = BLK_STS_OK; - bio_endio(dip->orig_bio); - } -out: bio_put(bio); -} - -static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, - struct btrfs_dio_private *dip, - struct bio *bio, - u64 file_offset) -{ - struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); - struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); - u16 csum_size; - blk_status_t ret; - - /* - * We load all the csum data we need when we submit - * the first bio to reduce the csum tree search and - * contention. - */ - if (dip->logical_offset == file_offset) { - ret = btrfs_lookup_bio_sums(inode, dip->orig_bio, file_offset, - NULL); - if (ret) - return ret; - } - - if (bio == dip->orig_bio) - return 0; - - file_offset -= dip->logical_offset; - file_offset >>= inode->i_sb->s_blocksize_bits; - csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy); - io_bio->csum = orig_io_bio->csum + csum_size * file_offset; - - return 0; + btrfs_dio_private_put(dip); } static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, @@ -7892,10 +7658,12 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, if (ret) goto err; } else { - ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio, - file_offset); - if (ret) - goto err; + u64 csum_offset; + + csum_offset = file_offset - dip->logical_offset; + csum_offset >>= inode->i_sb->s_blocksize_bits; + csum_offset *= btrfs_super_csum_size(fs_info->super_copy); + btrfs_io_bio(bio)->csum = dip->csums + csum_offset; } map: ret = btrfs_map_bio(fs_info, bio, 0); @@ -7903,14 +7671,53 @@ err: return ret; } -static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) +/* + * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked + * or ordered extents whether or not we submit any bios. + */ +static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, + struct inode *inode, + loff_t file_offset) { - struct inode *inode = dip->inode; + const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); + const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); + size_t dip_size; + struct btrfs_dio_private *dip; + + dip_size = sizeof(*dip); + if (!write && csum) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); + size_t nblocks; + + nblocks = dio_bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; + dip_size += csum_size * nblocks; + } + + dip = kzalloc(dip_size, GFP_NOFS); + if (!dip) + return NULL; + + dip->inode = inode; + dip->logical_offset = file_offset; + dip->bytes = dio_bio->bi_iter.bi_size; + dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; + dip->dio_bio = dio_bio; + refcount_set(&dip->refs, 1); + return dip; +} + +static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap, + struct bio *dio_bio, loff_t file_offset) +{ + const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); + const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + const bool raid56 = (btrfs_data_alloc_profile(fs_info) & + BTRFS_BLOCK_GROUP_RAID56_MASK); + struct btrfs_dio_private *dip; struct bio *bio; - struct bio *orig_bio = dip->orig_bio; - u64 start_sector = orig_bio->bi_iter.bi_sector; - u64 file_offset = dip->logical_offset; + u64 start_sector; int async_submit = 0; u64 submit_len; int clone_offset = 0; @@ -7918,339 +7725,115 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) int ret; blk_status_t status; struct btrfs_io_geometry geom; + struct btrfs_dio_data *dio_data = iomap->private; - submit_len = orig_bio->bi_iter.bi_size; - ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), - start_sector << 9, submit_len, &geom); - if (ret) - return -EIO; + dip = btrfs_create_dio_private(dio_bio, inode, file_offset); + if (!dip) { + if (!write) { + unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, + file_offset + dio_bio->bi_iter.bi_size - 1); + } + dio_bio->bi_status = BLK_STS_RESOURCE; + bio_endio(dio_bio); + return BLK_QC_T_NONE; + } - if (geom.len >= submit_len) { - bio = orig_bio; - dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED; - goto submit; + if (!write && csum) { + /* + * Load the csums up front to reduce csum tree searches and + * contention when submitting bios. + */ + status = btrfs_lookup_bio_sums(inode, dio_bio, file_offset, + dip->csums); + if (status != BLK_STS_OK) + goto out_err; } - /* async crcs make it difficult to collect full stripe writes. */ - if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK) - async_submit = 0; - else - async_submit = 1; + start_sector = dio_bio->bi_iter.bi_sector; + submit_len = dio_bio->bi_iter.bi_size; - /* bio split */ - ASSERT(geom.len <= INT_MAX); - atomic_inc(&dip->pending_bios); do { + ret = btrfs_get_io_geometry(fs_info, btrfs_op(dio_bio), + start_sector << 9, submit_len, + &geom); + if (ret) { + status = errno_to_blk_status(ret); + goto out_err; + } + ASSERT(geom.len <= INT_MAX); + clone_len = min_t(int, submit_len, geom.len); /* * This will never fail as it's passing GPF_NOFS and * the allocation is backed by btrfs_bioset. */ - bio = btrfs_bio_clone_partial(orig_bio, clone_offset, - clone_len); + bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len); bio->bi_private = dip; bio->bi_end_io = btrfs_end_dio_bio; btrfs_io_bio(bio)->logical = file_offset; ASSERT(submit_len >= clone_len); submit_len -= clone_len; - if (submit_len == 0) - break; /* * Increase the count before we submit the bio so we know * the end IO handler won't happen before we increase the * count. Otherwise, the dip might get freed before we're * done setting it up. + * + * We transfer the initial reference to the last bio, so we + * don't need to increment the reference count for the last one. */ - atomic_inc(&dip->pending_bios); + if (submit_len > 0) { + refcount_inc(&dip->refs); + /* + * If we are submitting more than one bio, submit them + * all asynchronously. The exception is RAID 5 or 6, as + * asynchronous checksums make it difficult to collect + * full stripe writes. + */ + if (!raid56) + async_submit = 1; + } status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit); if (status) { bio_put(bio); - atomic_dec(&dip->pending_bios); + if (submit_len > 0) + refcount_dec(&dip->refs); goto out_err; } + dio_data->submitted += clone_len; clone_offset += clone_len; start_sector += clone_len >> 9; file_offset += clone_len; - - ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), - start_sector << 9, submit_len, &geom); - if (ret) - goto out_err; } while (submit_len > 0); + return BLK_QC_T_NONE; -submit: - status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit); - if (!status) - return 0; - - bio_put(bio); out_err: - dip->errors = 1; - /* - * Before atomic variable goto zero, we must make sure dip->errors is - * perceived to be set. This ordering is ensured by the fact that an - * atomic operations with a return value are fully ordered as per - * atomic_t.txt - */ - if (atomic_dec_and_test(&dip->pending_bios)) - bio_io_error(dip->orig_bio); - - /* bio_end_io() will handle error, so we needn't return it */ - return 0; -} - -static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, - loff_t file_offset) -{ - struct btrfs_dio_private *dip = NULL; - struct bio *bio = NULL; - struct btrfs_io_bio *io_bio; - bool write = (bio_op(dio_bio) == REQ_OP_WRITE); - int ret = 0; - - bio = btrfs_bio_clone(dio_bio); - - dip = kzalloc(sizeof(*dip), GFP_NOFS); - if (!dip) { - ret = -ENOMEM; - goto free_ordered; - } - - dip->private = dio_bio->bi_private; - dip->inode = inode; - dip->logical_offset = file_offset; - dip->bytes = dio_bio->bi_iter.bi_size; - dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; - bio->bi_private = dip; - dip->orig_bio = bio; - dip->dio_bio = dio_bio; - atomic_set(&dip->pending_bios, 0); - io_bio = btrfs_io_bio(bio); - io_bio->logical = file_offset; - - if (write) { - bio->bi_end_io = btrfs_endio_direct_write; - } else { - bio->bi_end_io = btrfs_endio_direct_read; - dip->subio_endio = btrfs_subio_endio_read; - } - - /* - * Reset the range for unsubmitted ordered extents (to a 0 length range) - * even if we fail to submit a bio, because in such case we do the - * corresponding error handling below and it must not be done a second - * time by btrfs_direct_IO(). - */ - if (write) { - struct btrfs_dio_data *dio_data = current->journal_info; - - dio_data->unsubmitted_oe_range_end = dip->logical_offset + - dip->bytes; - dio_data->unsubmitted_oe_range_start = - dio_data->unsubmitted_oe_range_end; - } - - ret = btrfs_submit_direct_hook(dip); - if (!ret) - return; - - btrfs_io_bio_free_csum(io_bio); - -free_ordered: - /* - * If we arrived here it means either we failed to submit the dip - * or we either failed to clone the dio_bio or failed to allocate the - * dip. If we cloned the dio_bio and allocated the dip, we can just - * call bio_endio against our io_bio so that we get proper resource - * cleanup if we fail to submit the dip, otherwise, we must do the - * same as btrfs_endio_direct_[write|read] because we can't call these - * callbacks - they require an allocated dip and a clone of dio_bio. - */ - if (bio && dip) { - bio_io_error(bio); - /* - * The end io callbacks free our dip, do the final put on bio - * and all the cleanup and final put for dio_bio (through - * dio_end_io()). - */ - dip = NULL; - bio = NULL; - } else { - if (write) - __endio_write_update_ordered(inode, - file_offset, - dio_bio->bi_iter.bi_size, - false); - else - unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, - file_offset + dio_bio->bi_iter.bi_size - 1); - - dio_bio->bi_status = BLK_STS_IOERR; - /* - * Releases and cleans up our dio_bio, no need to bio_put() - * nor bio_endio()/bio_io_error() against dio_bio. - */ - dio_end_io(dio_bio); - } - if (bio) - bio_put(bio); - kfree(dip); -} - -static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, - const struct iov_iter *iter, loff_t offset) -{ - int seg; - int i; - unsigned int blocksize_mask = fs_info->sectorsize - 1; - ssize_t retval = -EINVAL; - - if (offset & blocksize_mask) - goto out; - - if (iov_iter_alignment(iter) & blocksize_mask) - goto out; - - /* If this is a write we don't need to check anymore */ - if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter)) - return 0; - /* - * Check to make sure we don't have duplicate iov_base's in this - * iovec, if so return EINVAL, otherwise we'll get csum errors - * when reading back. - */ - for (seg = 0; seg < iter->nr_segs; seg++) { - for (i = seg + 1; i < iter->nr_segs; i++) { - if (iter->iov[seg].iov_base == iter->iov[i].iov_base) - goto out; - } - } - retval = 0; -out: - return retval; + dip->dio_bio->bi_status = status; + btrfs_dio_private_put(dip); + return BLK_QC_T_NONE; } -static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) -{ - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_dio_data dio_data = { 0 }; - struct extent_changeset *data_reserved = NULL; - loff_t offset = iocb->ki_pos; - size_t count = 0; - int flags = 0; - bool wakeup = true; - bool relock = false; - ssize_t ret; - - if (check_direct_IO(fs_info, iter, offset)) - return 0; - - inode_dio_begin(inode); - - /* - * The generic stuff only does filemap_write_and_wait_range, which - * isn't enough if we've written compressed pages to this area, so - * we need to flush the dirty pages again to make absolutely sure - * that any outstanding dirty pages are on disk. - */ - count = iov_iter_count(iter); - if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, - &BTRFS_I(inode)->runtime_flags)) - filemap_fdatawrite_range(inode->i_mapping, offset, - offset + count - 1); - - if (iov_iter_rw(iter) == WRITE) { - /* - * If the write DIO is beyond the EOF, we need update - * the isize, but it is protected by i_mutex. So we can - * not unlock the i_mutex at this case. - */ - if (offset + count <= inode->i_size) { - dio_data.overwrite = 1; - inode_unlock(inode); - relock = true; - } else if (iocb->ki_flags & IOCB_NOWAIT) { - ret = -EAGAIN; - goto out; - } - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, - offset, count); - if (ret) - goto out; - - /* - * We need to know how many extents we reserved so that we can - * do the accounting properly if we go over the number we - * originally calculated. Abuse current->journal_info for this. - */ - dio_data.reserve = round_up(count, - fs_info->sectorsize); - dio_data.unsubmitted_oe_range_start = (u64)offset; - dio_data.unsubmitted_oe_range_end = (u64)offset; - current->journal_info = &dio_data; - down_read(&BTRFS_I(inode)->dio_sem); - } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, - &BTRFS_I(inode)->runtime_flags)) { - inode_dio_end(inode); - flags = DIO_LOCKING | DIO_SKIP_HOLES; - wakeup = false; - } - - ret = __blockdev_direct_IO(iocb, inode, - fs_info->fs_devices->latest_bdev, - iter, btrfs_get_blocks_direct, NULL, - btrfs_submit_direct, flags); - if (iov_iter_rw(iter) == WRITE) { - up_read(&BTRFS_I(inode)->dio_sem); - current->journal_info = NULL; - if (ret < 0 && ret != -EIOCBQUEUED) { - if (dio_data.reserve) - btrfs_delalloc_release_space(inode, data_reserved, - offset, dio_data.reserve, true); - /* - * On error we might have left some ordered extents - * without submitting corresponding bios for them, so - * cleanup them up to avoid other tasks getting them - * and waiting for them to complete forever. - */ - if (dio_data.unsubmitted_oe_range_start < - dio_data.unsubmitted_oe_range_end) - __endio_write_update_ordered(inode, - dio_data.unsubmitted_oe_range_start, - dio_data.unsubmitted_oe_range_end - - dio_data.unsubmitted_oe_range_start, - false); - } else if (ret >= 0 && (size_t)ret < count) - btrfs_delalloc_release_space(inode, data_reserved, - offset, count - (size_t)ret, true); - btrfs_delalloc_release_extents(BTRFS_I(inode), count); - } -out: - if (wakeup) - inode_dio_end(inode); - if (relock) - inode_lock(inode); - - extent_changeset_free(data_reserved); - return ret; -} +const struct iomap_ops btrfs_dio_iomap_ops = { + .iomap_begin = btrfs_dio_iomap_begin, + .iomap_end = btrfs_dio_iomap_end, +}; -#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) +const struct iomap_dio_ops btrfs_dops = { + .submit_io = btrfs_submit_direct, +}; static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { int ret; - ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS); + ret = fiemap_prep(inode, fieinfo, start, &len, 0); if (ret) return ret; @@ -8293,21 +7876,16 @@ static int btrfs_writepages(struct address_space *mapping, return extent_writepages(mapping, wbc); } -static int -btrfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void btrfs_readahead(struct readahead_control *rac) { - return extent_readpages(mapping, pages, nr_pages); + extent_readahead(rac); } static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) { int ret = try_release_extent_mapping(page, gfp_flags); - if (ret == 1) { - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); - } + if (ret == 1) + detach_page_private(page); return ret; } @@ -8329,14 +7907,8 @@ static int btrfs_migratepage(struct address_space *mapping, if (ret != MIGRATEPAGE_SUCCESS) return ret; - if (page_has_private(page)) { - ClearPagePrivate(page); - get_page(newpage); - set_page_private(newpage, page_private(page)); - set_page_private(page, 0); - put_page(page); - SetPagePrivate(newpage); - } + if (page_has_private(page)) + attach_page_private(newpage, detach_page_private(page)); if (PagePrivate2(page)) { ClearPagePrivate2(page); @@ -8458,11 +8030,7 @@ again: } ClearPageChecked(page); - if (PagePrivate(page)) { - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); - } + detach_page_private(page); } /* @@ -10553,8 +10121,8 @@ static const struct address_space_operations btrfs_aops = { .readpage = btrfs_readpage, .writepage = btrfs_writepage, .writepages = btrfs_writepages, - .readpages = btrfs_readpages, - .direct_IO = btrfs_direct_IO, + .readahead = btrfs_readahead, + .direct_IO = noop_direct_IO, .invalidatepage = btrfs_invalidatepage, .releasepage = btrfs_releasepage, #ifdef CONFIG_MIGRATION diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 40b729dce91c..168deb8ef68a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -660,7 +660,7 @@ static noinline int create_subvol(struct inode *dir, goto fail; key.offset = (u64)-1; - new_root = btrfs_get_fs_root(fs_info, &key, true); + new_root = btrfs_get_fs_root(fs_info, objectid, true); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); btrfs_abort_transaction(trans, ret); @@ -748,9 +748,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, struct btrfs_pending_snapshot *pending_snapshot; struct btrfs_trans_handle *trans; int ret; - bool snapshot_force_cow = false; - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return -EINVAL; if (atomic_read(&root->nr_swapfiles)) { @@ -771,27 +770,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, goto free_pending; } - /* - * Force new buffered writes to reserve space even when NOCOW is - * possible. This is to avoid later writeback (running dealloc) to - * fallback to COW mode and unexpectedly fail with ENOSPC. - */ - btrfs_drew_read_lock(&root->snapshot_lock); - - ret = btrfs_start_delalloc_snapshot(root); - if (ret) - goto dec_and_free; - - /* - * All previous writes have started writeback in NOCOW mode, so now - * we force future writes to fallback to COW mode during snapshot - * creation. - */ - atomic_inc(&root->snapshot_force_cow); - snapshot_force_cow = true; - - btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); - btrfs_init_block_rsv(&pending_snapshot->block_rsv, BTRFS_BLOCK_RSV_TEMP); /* @@ -806,7 +784,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, &pending_snapshot->block_rsv, 8, false); if (ret) - goto dec_and_free; + goto free_pending; pending_snapshot->dentry = dentry; pending_snapshot->root = root; @@ -848,11 +826,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, fail: btrfs_put_root(pending_snapshot->snap); btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); -dec_and_free: - if (snapshot_force_cow) - atomic_dec(&root->snapshot_force_cow); - btrfs_drew_read_unlock(&root->snapshot_lock); - free_pending: kfree(pending_snapshot->root_item); btrfs_free_path(pending_snapshot->path); @@ -983,6 +956,45 @@ out_unlock: return error; } +static noinline int btrfs_mksnapshot(const struct path *parent, + const char *name, int namelen, + struct btrfs_root *root, + bool readonly, + struct btrfs_qgroup_inherit *inherit) +{ + int ret; + bool snapshot_force_cow = false; + + /* + * Force new buffered writes to reserve space even when NOCOW is + * possible. This is to avoid later writeback (running dealloc) to + * fallback to COW mode and unexpectedly fail with ENOSPC. + */ + btrfs_drew_read_lock(&root->snapshot_lock); + + ret = btrfs_start_delalloc_snapshot(root); + if (ret) + goto out; + + /* + * All previous writes have started writeback in NOCOW mode, so now + * we force future writes to fallback to COW mode during snapshot + * creation. + */ + atomic_inc(&root->snapshot_force_cow); + snapshot_force_cow = true; + + btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); + + ret = btrfs_mksubvol(parent, name, namelen, + root, readonly, inherit); +out: + if (snapshot_force_cow) + atomic_dec(&root->snapshot_force_cow); + btrfs_drew_read_unlock(&root->snapshot_lock); + return ret; +} + /* * When we're defragging a range, we don't want to kick it off again * if it is really just waiting for delalloc to send it down. @@ -1762,7 +1774,7 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file, */ ret = -EPERM; } else { - ret = btrfs_mksubvol(&file->f_path, name, namelen, + ret = btrfs_mksnapshot(&file->f_path, name, namelen, BTRFS_I(src_inode)->root, readonly, inherit); } @@ -2127,10 +2139,7 @@ static noinline int search_ioctl(struct inode *inode, /* search the root of the inode that was passed */ root = btrfs_grab_root(BTRFS_I(inode)->root); } else { - key.objectid = sk->tree_id; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - root = btrfs_get_fs_root(info, &key, true); + root = btrfs_get_fs_root(info, sk->tree_id, true); if (IS_ERR(root)) { btrfs_free_path(path); return PTR_ERR(root); @@ -2263,10 +2272,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1]; - key.objectid = tree_id; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - root = btrfs_get_fs_root(info, &key, true); + root = btrfs_get_fs_root(info, tree_id, true); if (IS_ERR(root)) { ret = PTR_ERR(root); root = NULL; @@ -2359,10 +2365,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, if (dirid != upper_limit.objectid) { ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1]; - key.objectid = treeid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - root = btrfs_get_fs_root(fs_info, &key, true); + root = btrfs_get_fs_root(fs_info, treeid, true); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out; @@ -2421,7 +2424,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, goto out_put; } - temp_inode = btrfs_iget(sb, &key2, root); + temp_inode = btrfs_iget(sb, key2.objectid, root); if (IS_ERR(temp_inode)) { ret = PTR_ERR(temp_inode); goto out_put; @@ -2608,9 +2611,7 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) /* Get root_item of inode's subvolume */ key.objectid = BTRFS_I(inode)->root->root_key.objectid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - root = btrfs_get_fs_root(fs_info, &key, true); + root = btrfs_get_fs_root(fs_info, key.objectid, true); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out_free; @@ -3278,7 +3279,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) struct btrfs_dir_item *di; struct btrfs_trans_handle *trans; struct btrfs_path *path = NULL; - struct btrfs_key location; struct btrfs_disk_key disk_key; u64 objectid = 0; u64 dir_id; @@ -3299,11 +3299,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) if (!objectid) objectid = BTRFS_FS_TREE_OBJECTID; - location.objectid = objectid; - location.type = BTRFS_ROOT_ITEM_KEY; - location.offset = (u64)-1; - - new_root = btrfs_get_fs_root(fs_info, &location, true); + new_root = btrfs_get_fs_root(fs_info, objectid, true); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); goto out; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index fb647d8cf527..f75612e18a82 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -410,6 +410,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) * The rwlock is held for write upon exit. */ void btrfs_tree_lock(struct extent_buffer *eb) + __acquires(&eb->lock) { u64 start_ns = 0; diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h index 72bab64ecf60..6461ebc3a1c1 100644 --- a/fs/btrfs/misc.h +++ b/fs/btrfs/misc.h @@ -6,6 +6,7 @@ #include <linux/sched.h> #include <linux/wait.h> #include <asm/div64.h> +#include <linux/rbtree.h> #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len)) @@ -58,4 +59,57 @@ static inline bool has_single_bit_set(u64 n) return is_power_of_two_u64(n); } +/* + * Simple bytenr based rb_tree relate structures + * + * Any structure wants to use bytenr as single search index should have their + * structure start with these members. + */ +struct rb_simple_node { + struct rb_node rb_node; + u64 bytenr; +}; + +static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr) +{ + struct rb_node *node = root->rb_node; + struct rb_simple_node *entry; + + while (node) { + entry = rb_entry(node, struct rb_simple_node, rb_node); + + if (bytenr < entry->bytenr) + node = node->rb_left; + else if (bytenr > entry->bytenr) + node = node->rb_right; + else + return node; + } + return NULL; +} + +static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr, + struct rb_node *node) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct rb_simple_node *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct rb_simple_node, rb_node); + + if (bytenr < entry->bytenr) + p = &(*p)->rb_left; + else if (bytenr > entry->bytenr) + p = &(*p)->rb_right; + else + return parent; + } + + rb_link_node(node, parent, p); + rb_insert_color(node, root); + return NULL; +} + #endif diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index ff1ff90e48b1..2dcb1cb21634 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c @@ -408,19 +408,14 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, struct btrfs_root *parent_root) { struct super_block *sb = root->fs_info->sb; - struct btrfs_key key; struct inode *parent_inode, *child_inode; int ret; - key.objectid = BTRFS_FIRST_FREE_OBJECTID; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - - parent_inode = btrfs_iget(sb, &key, parent_root); + parent_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, parent_root); if (IS_ERR(parent_inode)) return PTR_ERR(parent_inode); - child_inode = btrfs_iget(sb, &key, root); + child_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, root); if (IS_ERR(child_inode)) { iput(parent_inode); return PTR_ERR(child_inode); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index c3888fb367e7..5bd4089ad0e1 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2622,6 +2622,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, struct btrfs_root *quota_root; struct btrfs_qgroup *srcgroup; struct btrfs_qgroup *dstgroup; + bool need_rescan = false; u32 level_size = 0; u64 nums; @@ -2765,6 +2766,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, goto unlock; } ++i_qgroups; + + /* + * If we're doing a snapshot, and adding the snapshot to a new + * qgroup, the numbers are guaranteed to be incorrect. + */ + if (srcid) + need_rescan = true; } for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { @@ -2784,6 +2792,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, dst->rfer = src->rfer - level_size; dst->rfer_cmpr = src->rfer_cmpr - level_size; + + /* Manually tweaking numbers certainly needs a rescan */ + need_rescan = true; } for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { struct btrfs_qgroup *src; @@ -2802,6 +2813,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, dst->excl = src->excl + level_size; dst->excl_cmpr = src->excl_cmpr + level_size; + need_rescan = true; } unlock: @@ -2809,6 +2821,8 @@ unlock: out: if (!committing) mutex_unlock(&fs_info->qgroup_ioctl_lock); + if (need_rescan) + fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; return ret; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 03bc7134e8cb..3bbae80c752f 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -24,6 +24,7 @@ #include "delalloc-space.h" #include "block-group.h" #include "backref.h" +#include "misc.h" /* * Relocation overview @@ -72,100 +73,15 @@ * The entry point of relocation is relocate_block_group() function. */ -/* - * backref_node, mapping_node and tree_block start with this - */ -struct tree_entry { - struct rb_node rb_node; - u64 bytenr; -}; - -/* - * present a tree block in the backref cache - */ -struct backref_node { - struct rb_node rb_node; - u64 bytenr; - - u64 new_bytenr; - /* objectid of tree block owner, can be not uptodate */ - u64 owner; - /* link to pending, changed or detached list */ - struct list_head list; - /* list of upper level blocks reference this block */ - struct list_head upper; - /* list of child blocks in the cache */ - struct list_head lower; - /* NULL if this node is not tree root */ - struct btrfs_root *root; - /* extent buffer got by COW the block */ - struct extent_buffer *eb; - /* level of tree block */ - unsigned int level:8; - /* is the block in non-reference counted tree */ - unsigned int cowonly:1; - /* 1 if no child node in the cache */ - unsigned int lowest:1; - /* is the extent buffer locked */ - unsigned int locked:1; - /* has the block been processed */ - unsigned int processed:1; - /* have backrefs of this block been checked */ - unsigned int checked:1; - /* - * 1 if corresponding block has been cowed but some upper - * level block pointers may not point to the new location - */ - unsigned int pending:1; - /* - * 1 if the backref node isn't connected to any other - * backref node. - */ - unsigned int detached:1; -}; - -/* - * present a block pointer in the backref cache - */ -struct backref_edge { - struct list_head list[2]; - struct backref_node *node[2]; -}; - -#define LOWER 0 -#define UPPER 1 #define RELOCATION_RESERVED_NODES 256 - -struct backref_cache { - /* red black tree of all backref nodes in the cache */ - struct rb_root rb_root; - /* for passing backref nodes to btrfs_reloc_cow_block */ - struct backref_node *path[BTRFS_MAX_LEVEL]; - /* - * list of blocks that have been cowed but some block - * pointers in upper level blocks may not reflect the - * new location - */ - struct list_head pending[BTRFS_MAX_LEVEL]; - /* list of backref nodes with no child node */ - struct list_head leaves; - /* list of blocks that have been cowed in current transaction */ - struct list_head changed; - /* list of detached backref node. */ - struct list_head detached; - - u64 last_trans; - - int nr_nodes; - int nr_edges; -}; - /* * map address of tree root to tree */ struct mapping_node { - struct rb_node rb_node; - u64 bytenr; + struct { + struct rb_node rb_node; + u64 bytenr; + }; /* Use rb_simle_node for search/insert */ void *data; }; @@ -178,8 +94,10 @@ struct mapping_tree { * present a tree block to process */ struct tree_block { - struct rb_node rb_node; - u64 bytenr; + struct { + struct rb_node rb_node; + u64 bytenr; + }; /* Use rb_simple_node for search/insert */ struct btrfs_key key; unsigned int level:8; unsigned int key_ready:1; @@ -204,7 +122,7 @@ struct reloc_control { struct btrfs_block_rsv *block_rsv; - struct backref_cache backref_cache; + struct btrfs_backref_cache backref_cache; struct file_extent_cluster cluster; /* tree blocks have been processed */ @@ -235,168 +153,41 @@ struct reloc_control { #define MOVE_DATA_EXTENTS 0 #define UPDATE_DATA_PTRS 1 -static void remove_backref_node(struct backref_cache *cache, - struct backref_node *node); -static void __mark_block_processed(struct reloc_control *rc, - struct backref_node *node); - -static void mapping_tree_init(struct mapping_tree *tree) -{ - tree->rb_root = RB_ROOT; - spin_lock_init(&tree->lock); -} - -static void backref_cache_init(struct backref_cache *cache) -{ - int i; - cache->rb_root = RB_ROOT; - for (i = 0; i < BTRFS_MAX_LEVEL; i++) - INIT_LIST_HEAD(&cache->pending[i]); - INIT_LIST_HEAD(&cache->changed); - INIT_LIST_HEAD(&cache->detached); - INIT_LIST_HEAD(&cache->leaves); -} - -static void backref_cache_cleanup(struct backref_cache *cache) -{ - struct backref_node *node; - int i; - - while (!list_empty(&cache->detached)) { - node = list_entry(cache->detached.next, - struct backref_node, list); - remove_backref_node(cache, node); - } - - while (!list_empty(&cache->leaves)) { - node = list_entry(cache->leaves.next, - struct backref_node, lower); - remove_backref_node(cache, node); - } - - cache->last_trans = 0; - - for (i = 0; i < BTRFS_MAX_LEVEL; i++) - ASSERT(list_empty(&cache->pending[i])); - ASSERT(list_empty(&cache->changed)); - ASSERT(list_empty(&cache->detached)); - ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); - ASSERT(!cache->nr_nodes); - ASSERT(!cache->nr_edges); -} - -static struct backref_node *alloc_backref_node(struct backref_cache *cache) -{ - struct backref_node *node; - - node = kzalloc(sizeof(*node), GFP_NOFS); - if (node) { - INIT_LIST_HEAD(&node->list); - INIT_LIST_HEAD(&node->upper); - INIT_LIST_HEAD(&node->lower); - RB_CLEAR_NODE(&node->rb_node); - cache->nr_nodes++; - } - return node; -} - -static void free_backref_node(struct backref_cache *cache, - struct backref_node *node) -{ - if (node) { - cache->nr_nodes--; - btrfs_put_root(node->root); - kfree(node); - } -} - -static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) -{ - struct backref_edge *edge; - - edge = kzalloc(sizeof(*edge), GFP_NOFS); - if (edge) - cache->nr_edges++; - return edge; -} - -static void free_backref_edge(struct backref_cache *cache, - struct backref_edge *edge) +static void mark_block_processed(struct reloc_control *rc, + struct btrfs_backref_node *node) { - if (edge) { - cache->nr_edges--; - kfree(edge); - } -} + u32 blocksize; -static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, - struct rb_node *node) -{ - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct tree_entry *entry; - - while (*p) { - parent = *p; - entry = rb_entry(parent, struct tree_entry, rb_node); - - if (bytenr < entry->bytenr) - p = &(*p)->rb_left; - else if (bytenr > entry->bytenr) - p = &(*p)->rb_right; - else - return parent; + if (node->level == 0 || + in_range(node->bytenr, rc->block_group->start, + rc->block_group->length)) { + blocksize = rc->extent_root->fs_info->nodesize; + set_extent_bits(&rc->processed_blocks, node->bytenr, + node->bytenr + blocksize - 1, EXTENT_DIRTY); } - - rb_link_node(node, parent, p); - rb_insert_color(node, root); - return NULL; + node->processed = 1; } -static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) -{ - struct rb_node *n = root->rb_node; - struct tree_entry *entry; - - while (n) { - entry = rb_entry(n, struct tree_entry, rb_node); - if (bytenr < entry->bytenr) - n = n->rb_left; - else if (bytenr > entry->bytenr) - n = n->rb_right; - else - return n; - } - return NULL; -} - -static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) +static void mapping_tree_init(struct mapping_tree *tree) { - - struct btrfs_fs_info *fs_info = NULL; - struct backref_node *bnode = rb_entry(rb_node, struct backref_node, - rb_node); - if (bnode->root) - fs_info = bnode->root->fs_info; - btrfs_panic(fs_info, errno, - "Inconsistency in backref cache found at offset %llu", - bytenr); + tree->rb_root = RB_ROOT; + spin_lock_init(&tree->lock); } /* * walk up backref nodes until reach node presents tree root */ -static struct backref_node *walk_up_backref(struct backref_node *node, - struct backref_edge *edges[], - int *index) +static struct btrfs_backref_node *walk_up_backref( + struct btrfs_backref_node *node, + struct btrfs_backref_edge *edges[], int *index) { - struct backref_edge *edge; + struct btrfs_backref_edge *edge; int idx = *index; while (!list_empty(&node->upper)) { edge = list_entry(node->upper.next, - struct backref_edge, list[LOWER]); + struct btrfs_backref_edge, list[LOWER]); edges[idx++] = edge; node = edge->node[UPPER]; } @@ -408,11 +199,11 @@ static struct backref_node *walk_up_backref(struct backref_node *node, /* * walk down backref nodes to find start of next reference path */ -static struct backref_node *walk_down_backref(struct backref_edge *edges[], - int *index) +static struct btrfs_backref_node *walk_down_backref( + struct btrfs_backref_edge *edges[], int *index) { - struct backref_edge *edge; - struct backref_node *lower; + struct btrfs_backref_edge *edge; + struct btrfs_backref_node *lower; int idx = *index; while (idx > 0) { @@ -423,7 +214,7 @@ static struct backref_node *walk_down_backref(struct backref_edge *edges[], continue; } edge = list_entry(edge->list[LOWER].next, - struct backref_edge, list[LOWER]); + struct btrfs_backref_edge, list[LOWER]); edges[idx - 1] = edge; *index = idx; return edge->node[UPPER]; @@ -432,95 +223,24 @@ static struct backref_node *walk_down_backref(struct backref_edge *edges[], return NULL; } -static void unlock_node_buffer(struct backref_node *node) -{ - if (node->locked) { - btrfs_tree_unlock(node->eb); - node->locked = 0; - } -} - -static void drop_node_buffer(struct backref_node *node) -{ - if (node->eb) { - unlock_node_buffer(node); - free_extent_buffer(node->eb); - node->eb = NULL; - } -} - -static void drop_backref_node(struct backref_cache *tree, - struct backref_node *node) -{ - BUG_ON(!list_empty(&node->upper)); - - drop_node_buffer(node); - list_del(&node->list); - list_del(&node->lower); - if (!RB_EMPTY_NODE(&node->rb_node)) - rb_erase(&node->rb_node, &tree->rb_root); - free_backref_node(tree, node); -} - -/* - * remove a backref node from the backref cache - */ -static void remove_backref_node(struct backref_cache *cache, - struct backref_node *node) -{ - struct backref_node *upper; - struct backref_edge *edge; - - if (!node) - return; - - BUG_ON(!node->lowest && !node->detached); - while (!list_empty(&node->upper)) { - edge = list_entry(node->upper.next, struct backref_edge, - list[LOWER]); - upper = edge->node[UPPER]; - list_del(&edge->list[LOWER]); - list_del(&edge->list[UPPER]); - free_backref_edge(cache, edge); - - if (RB_EMPTY_NODE(&upper->rb_node)) { - BUG_ON(!list_empty(&node->upper)); - drop_backref_node(cache, node); - node = upper; - node->lowest = 1; - continue; - } - /* - * add the node to leaf node list if no other - * child block cached. - */ - if (list_empty(&upper->lower)) { - list_add_tail(&upper->lower, &cache->leaves); - upper->lowest = 1; - } - } - - drop_backref_node(cache, node); -} - -static void update_backref_node(struct backref_cache *cache, - struct backref_node *node, u64 bytenr) +static void update_backref_node(struct btrfs_backref_cache *cache, + struct btrfs_backref_node *node, u64 bytenr) { struct rb_node *rb_node; rb_erase(&node->rb_node, &cache->rb_root); node->bytenr = bytenr; - rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); + rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node); if (rb_node) - backref_tree_panic(rb_node, -EEXIST, bytenr); + btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST); } /* * update backref cache after a transaction commit */ static int update_backref_cache(struct btrfs_trans_handle *trans, - struct backref_cache *cache) + struct btrfs_backref_cache *cache) { - struct backref_node *node; + struct btrfs_backref_node *node; int level = 0; if (cache->last_trans == 0) { @@ -538,13 +258,13 @@ static int update_backref_cache(struct btrfs_trans_handle *trans, */ while (!list_empty(&cache->detached)) { node = list_entry(cache->detached.next, - struct backref_node, list); - remove_backref_node(cache, node); + struct btrfs_backref_node, list); + btrfs_backref_cleanup_node(cache, node); } while (!list_empty(&cache->changed)) { node = list_entry(cache->changed.next, - struct backref_node, list); + struct btrfs_backref_node, list); list_del_init(&node->list); BUG_ON(node->pending); update_backref_node(cache, node, node->new_bytenr); @@ -585,7 +305,8 @@ static bool reloc_root_is_dead(struct btrfs_root *root) * * Reloc tree after swap is considered dead, thus not considered as valid. * This is enough for most callers, as they don't distinguish dead reloc root - * from no reloc root. But should_ignore_root() below is a special case. + * from no reloc root. But btrfs_should_ignore_reloc_root() below is a + * special case. */ static bool have_reloc_root(struct btrfs_root *root) { @@ -596,11 +317,11 @@ static bool have_reloc_root(struct btrfs_root *root) return true; } -static int should_ignore_root(struct btrfs_root *root) +int btrfs_should_ignore_reloc_root(struct btrfs_root *root) { struct btrfs_root *reloc_root; - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return 0; /* This root has been merged with its reloc tree, we can ignore it */ @@ -622,18 +343,20 @@ static int should_ignore_root(struct btrfs_root *root) */ return 1; } + /* * find reloc tree by address of tree root */ -static struct btrfs_root *find_reloc_root(struct reloc_control *rc, - u64 bytenr) +struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr) { + struct reloc_control *rc = fs_info->reloc_ctl; struct rb_node *rb_node; struct mapping_node *node; struct btrfs_root *root = NULL; + ASSERT(rc); spin_lock(&rc->reloc_root_tree.lock); - rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); + rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr); if (rb_node) { node = rb_entry(rb_node, struct mapping_node, rb_node); root = (struct btrfs_root *)node->data; @@ -642,594 +365,165 @@ static struct btrfs_root *find_reloc_root(struct reloc_control *rc, return btrfs_grab_root(root); } -static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, - u64 root_objectid) +/* + * For useless nodes, do two major clean ups: + * + * - Cleanup the children edges and nodes + * If child node is also orphan (no parent) during cleanup, then the child + * node will also be cleaned up. + * + * - Freeing up leaves (level 0), keeps nodes detached + * For nodes, the node is still cached as "detached" + * + * Return false if @node is not in the @useless_nodes list. + * Return true if @node is in the @useless_nodes list. + */ +static bool handle_useless_nodes(struct reloc_control *rc, + struct btrfs_backref_node *node) { - struct btrfs_key key; + struct btrfs_backref_cache *cache = &rc->backref_cache; + struct list_head *useless_node = &cache->useless_node; + bool ret = false; - key.objectid = root_objectid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; + while (!list_empty(useless_node)) { + struct btrfs_backref_node *cur; - return btrfs_get_fs_root(fs_info, &key, false); -} + cur = list_first_entry(useless_node, struct btrfs_backref_node, + list); + list_del_init(&cur->list); -static noinline_for_stack -int find_inline_backref(struct extent_buffer *leaf, int slot, - unsigned long *ptr, unsigned long *end) -{ - struct btrfs_key key; - struct btrfs_extent_item *ei; - struct btrfs_tree_block_info *bi; - u32 item_size; + /* Only tree root nodes can be added to @useless_nodes */ + ASSERT(list_empty(&cur->upper)); - btrfs_item_key_to_cpu(leaf, &key, slot); + if (cur == node) + ret = true; - item_size = btrfs_item_size_nr(leaf, slot); - if (item_size < sizeof(*ei)) { - btrfs_print_v0_err(leaf->fs_info); - btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL); - return 1; - } - ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); - WARN_ON(!(btrfs_extent_flags(leaf, ei) & - BTRFS_EXTENT_FLAG_TREE_BLOCK)); + /* The node is the lowest node */ + if (cur->lowest) { + list_del_init(&cur->lower); + cur->lowest = 0; + } - if (key.type == BTRFS_EXTENT_ITEM_KEY && - item_size <= sizeof(*ei) + sizeof(*bi)) { - WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); - return 1; - } - if (key.type == BTRFS_METADATA_ITEM_KEY && - item_size <= sizeof(*ei)) { - WARN_ON(item_size < sizeof(*ei)); - return 1; - } + /* Cleanup the lower edges */ + while (!list_empty(&cur->lower)) { + struct btrfs_backref_edge *edge; + struct btrfs_backref_node *lower; - if (key.type == BTRFS_EXTENT_ITEM_KEY) { - bi = (struct btrfs_tree_block_info *)(ei + 1); - *ptr = (unsigned long)(bi + 1); - } else { - *ptr = (unsigned long)(ei + 1); + edge = list_entry(cur->lower.next, + struct btrfs_backref_edge, list[UPPER]); + list_del(&edge->list[UPPER]); + list_del(&edge->list[LOWER]); + lower = edge->node[LOWER]; + btrfs_backref_free_edge(cache, edge); + + /* Child node is also orphan, queue for cleanup */ + if (list_empty(&lower->upper)) + list_add(&lower->list, useless_node); + } + /* Mark this block processed for relocation */ + mark_block_processed(rc, cur); + + /* + * Backref nodes for tree leaves are deleted from the cache. + * Backref nodes for upper level tree blocks are left in the + * cache to avoid unnecessary backref lookup. + */ + if (cur->level > 0) { + list_add(&cur->list, &cache->detached); + cur->detached = 1; + } else { + rb_erase(&cur->rb_node, &cache->rb_root); + btrfs_backref_free_node(cache, cur); + } } - *end = (unsigned long)ei + item_size; - return 0; + return ret; } /* - * build backref tree for a given tree block. root of the backref tree - * corresponds the tree block, leaves of the backref tree correspond - * roots of b-trees that reference the tree block. + * Build backref tree for a given tree block. Root of the backref tree + * corresponds the tree block, leaves of the backref tree correspond roots of + * b-trees that reference the tree block. * - * the basic idea of this function is check backrefs of a given block - * to find upper level blocks that reference the block, and then check - * backrefs of these upper level blocks recursively. the recursion stop - * when tree root is reached or backrefs for the block is cached. + * The basic idea of this function is check backrefs of a given block to find + * upper level blocks that reference the block, and then check backrefs of + * these upper level blocks recursively. The recursion stops when tree root is + * reached or backrefs for the block is cached. * - * NOTE: if we find backrefs for a block are cached, we know backrefs - * for all upper level blocks that directly/indirectly reference the - * block are also cached. + * NOTE: if we find that backrefs for a block are cached, we know backrefs for + * all upper level blocks that directly/indirectly reference the block are also + * cached. */ -static noinline_for_stack -struct backref_node *build_backref_tree(struct reloc_control *rc, - struct btrfs_key *node_key, - int level, u64 bytenr) +static noinline_for_stack struct btrfs_backref_node *build_backref_tree( + struct reloc_control *rc, struct btrfs_key *node_key, + int level, u64 bytenr) { - struct backref_cache *cache = &rc->backref_cache; - struct btrfs_path *path1; /* For searching extent root */ - struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */ - struct extent_buffer *eb; - struct btrfs_root *root; - struct backref_node *cur; - struct backref_node *upper; - struct backref_node *lower; - struct backref_node *node = NULL; - struct backref_node *exist = NULL; - struct backref_edge *edge; - struct rb_node *rb_node; - struct btrfs_key key; - unsigned long end; - unsigned long ptr; - LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */ - LIST_HEAD(useless); - int cowonly; + struct btrfs_backref_iter *iter; + struct btrfs_backref_cache *cache = &rc->backref_cache; + /* For searching parent of TREE_BLOCK_REF */ + struct btrfs_path *path; + struct btrfs_backref_node *cur; + struct btrfs_backref_node *node = NULL; + struct btrfs_backref_edge *edge; int ret; int err = 0; - bool need_check = true; - path1 = btrfs_alloc_path(); - path2 = btrfs_alloc_path(); - if (!path1 || !path2) { + iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS); + if (!iter) + return ERR_PTR(-ENOMEM); + path = btrfs_alloc_path(); + if (!path) { err = -ENOMEM; goto out; } - node = alloc_backref_node(cache); + node = btrfs_backref_alloc_node(cache, bytenr, level); if (!node) { err = -ENOMEM; goto out; } - node->bytenr = bytenr; - node->level = level; node->lowest = 1; cur = node; -again: - end = 0; - ptr = 0; - key.objectid = cur->bytenr; - key.type = BTRFS_METADATA_ITEM_KEY; - key.offset = (u64)-1; - - path1->search_commit_root = 1; - path1->skip_locking = 1; - ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, - 0, 0); - if (ret < 0) { - err = ret; - goto out; - } - ASSERT(ret); - ASSERT(path1->slots[0]); - - path1->slots[0]--; - WARN_ON(cur->checked); - if (!list_empty(&cur->upper)) { - /* - * the backref was added previously when processing - * backref of type BTRFS_TREE_BLOCK_REF_KEY - */ - ASSERT(list_is_singular(&cur->upper)); - edge = list_entry(cur->upper.next, struct backref_edge, - list[LOWER]); - ASSERT(list_empty(&edge->list[UPPER])); - exist = edge->node[UPPER]; - /* - * add the upper level block to pending list if we need - * check its backrefs - */ - if (!exist->checked) - list_add_tail(&edge->list[UPPER], &list); - } else { - exist = NULL; - } - - while (1) { - cond_resched(); - eb = path1->nodes[0]; - - if (ptr >= end) { - if (path1->slots[0] >= btrfs_header_nritems(eb)) { - ret = btrfs_next_leaf(rc->extent_root, path1); - if (ret < 0) { - err = ret; - goto out; - } - if (ret > 0) - break; - eb = path1->nodes[0]; - } - - btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); - if (key.objectid != cur->bytenr) { - WARN_ON(exist); - break; - } - - if (key.type == BTRFS_EXTENT_ITEM_KEY || - key.type == BTRFS_METADATA_ITEM_KEY) { - ret = find_inline_backref(eb, path1->slots[0], - &ptr, &end); - if (ret) - goto next; - } - } - - if (ptr < end) { - /* update key for inline back ref */ - struct btrfs_extent_inline_ref *iref; - int type; - iref = (struct btrfs_extent_inline_ref *)ptr; - type = btrfs_get_extent_inline_ref_type(eb, iref, - BTRFS_REF_TYPE_BLOCK); - if (type == BTRFS_REF_TYPE_INVALID) { - err = -EUCLEAN; - goto out; - } - key.type = type; - key.offset = btrfs_extent_inline_ref_offset(eb, iref); - - WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && - key.type != BTRFS_SHARED_BLOCK_REF_KEY); - } - - /* - * Parent node found and matches current inline ref, no need to - * rebuild this node for this inline ref. - */ - if (exist && - ((key.type == BTRFS_TREE_BLOCK_REF_KEY && - exist->owner == key.offset) || - (key.type == BTRFS_SHARED_BLOCK_REF_KEY && - exist->bytenr == key.offset))) { - exist = NULL; - goto next; - } - - /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ - if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { - if (key.objectid == key.offset) { - /* - * Only root blocks of reloc trees use backref - * pointing to itself. - */ - root = find_reloc_root(rc, cur->bytenr); - ASSERT(root); - cur->root = root; - break; - } - - edge = alloc_backref_edge(cache); - if (!edge) { - err = -ENOMEM; - goto out; - } - rb_node = tree_search(&cache->rb_root, key.offset); - if (!rb_node) { - upper = alloc_backref_node(cache); - if (!upper) { - free_backref_edge(cache, edge); - err = -ENOMEM; - goto out; - } - upper->bytenr = key.offset; - upper->level = cur->level + 1; - /* - * backrefs for the upper level block isn't - * cached, add the block to pending list - */ - list_add_tail(&edge->list[UPPER], &list); - } else { - upper = rb_entry(rb_node, struct backref_node, - rb_node); - ASSERT(upper->checked); - INIT_LIST_HEAD(&edge->list[UPPER]); - } - list_add_tail(&edge->list[LOWER], &cur->upper); - edge->node[LOWER] = cur; - edge->node[UPPER] = upper; - - goto next; - } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { - err = -EINVAL; - btrfs_print_v0_err(rc->extent_root->fs_info); - btrfs_handle_fs_error(rc->extent_root->fs_info, err, - NULL); - goto out; - } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { - goto next; - } - - /* - * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset - * means the root objectid. We need to search the tree to get - * its parent bytenr. - */ - root = read_fs_root(rc->extent_root->fs_info, key.offset); - if (IS_ERR(root)) { - err = PTR_ERR(root); - goto out; - } - - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) - cur->cowonly = 1; - - if (btrfs_root_level(&root->root_item) == cur->level) { - /* tree root */ - ASSERT(btrfs_root_bytenr(&root->root_item) == - cur->bytenr); - if (should_ignore_root(root)) { - btrfs_put_root(root); - list_add(&cur->list, &useless); - } else { - cur->root = root; - } - break; - } - - level = cur->level + 1; - - /* Search the tree to find parent blocks referring the block. */ - path2->search_commit_root = 1; - path2->skip_locking = 1; - path2->lowest_level = level; - ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); - path2->lowest_level = 0; + /* Breadth-first search to build backref cache */ + do { + ret = btrfs_backref_add_tree_node(cache, path, iter, node_key, + cur); if (ret < 0) { - btrfs_put_root(root); err = ret; goto out; } - if (ret > 0 && path2->slots[level] > 0) - path2->slots[level]--; - - eb = path2->nodes[level]; - if (btrfs_node_blockptr(eb, path2->slots[level]) != - cur->bytenr) { - btrfs_err(root->fs_info, - "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", - cur->bytenr, level - 1, - root->root_key.objectid, - node_key->objectid, node_key->type, - node_key->offset); - btrfs_put_root(root); - err = -ENOENT; - goto out; - } - lower = cur; - need_check = true; - - /* Add all nodes and edges in the path */ - for (; level < BTRFS_MAX_LEVEL; level++) { - if (!path2->nodes[level]) { - ASSERT(btrfs_root_bytenr(&root->root_item) == - lower->bytenr); - if (should_ignore_root(root)) { - btrfs_put_root(root); - list_add(&lower->list, &useless); - } else { - lower->root = root; - } - break; - } - - edge = alloc_backref_edge(cache); - if (!edge) { - btrfs_put_root(root); - err = -ENOMEM; - goto out; - } - - eb = path2->nodes[level]; - rb_node = tree_search(&cache->rb_root, eb->start); - if (!rb_node) { - upper = alloc_backref_node(cache); - if (!upper) { - btrfs_put_root(root); - free_backref_edge(cache, edge); - err = -ENOMEM; - goto out; - } - upper->bytenr = eb->start; - upper->owner = btrfs_header_owner(eb); - upper->level = lower->level + 1; - if (!test_bit(BTRFS_ROOT_REF_COWS, - &root->state)) - upper->cowonly = 1; - - /* - * if we know the block isn't shared - * we can void checking its backrefs. - */ - if (btrfs_block_can_be_shared(root, eb)) - upper->checked = 0; - else - upper->checked = 1; - - /* - * add the block to pending list if we - * need check its backrefs, we only do this once - * while walking up a tree as we will catch - * anything else later on. - */ - if (!upper->checked && need_check) { - need_check = false; - list_add_tail(&edge->list[UPPER], - &list); - } else { - if (upper->checked) - need_check = true; - INIT_LIST_HEAD(&edge->list[UPPER]); - } - } else { - upper = rb_entry(rb_node, struct backref_node, - rb_node); - ASSERT(upper->checked); - INIT_LIST_HEAD(&edge->list[UPPER]); - if (!upper->owner) - upper->owner = btrfs_header_owner(eb); - } - list_add_tail(&edge->list[LOWER], &lower->upper); - edge->node[LOWER] = lower; - edge->node[UPPER] = upper; - - if (rb_node) { - btrfs_put_root(root); - break; - } - lower = upper; - upper = NULL; - } - btrfs_release_path(path2); -next: - if (ptr < end) { - ptr += btrfs_extent_inline_ref_size(key.type); - if (ptr >= end) { - WARN_ON(ptr > end); - ptr = 0; - end = 0; - } - } - if (ptr >= end) - path1->slots[0]++; - } - btrfs_release_path(path1); - - cur->checked = 1; - WARN_ON(exist); - - /* the pending list isn't empty, take the first block to process */ - if (!list_empty(&list)) { - edge = list_entry(list.next, struct backref_edge, list[UPPER]); - list_del_init(&edge->list[UPPER]); - cur = edge->node[UPPER]; - goto again; - } - - /* - * everything goes well, connect backref nodes and insert backref nodes - * into the cache. - */ - ASSERT(node->checked); - cowonly = node->cowonly; - if (!cowonly) { - rb_node = tree_insert(&cache->rb_root, node->bytenr, - &node->rb_node); - if (rb_node) - backref_tree_panic(rb_node, -EEXIST, node->bytenr); - list_add_tail(&node->lower, &cache->leaves); - } - - list_for_each_entry(edge, &node->upper, list[LOWER]) - list_add_tail(&edge->list[UPPER], &list); - - while (!list_empty(&list)) { - edge = list_entry(list.next, struct backref_edge, list[UPPER]); - list_del_init(&edge->list[UPPER]); - upper = edge->node[UPPER]; - if (upper->detached) { - list_del(&edge->list[LOWER]); - lower = edge->node[LOWER]; - free_backref_edge(cache, edge); - if (list_empty(&lower->upper)) - list_add(&lower->list, &useless); - continue; - } - - if (!RB_EMPTY_NODE(&upper->rb_node)) { - if (upper->lowest) { - list_del_init(&upper->lower); - upper->lowest = 0; - } - - list_add_tail(&edge->list[UPPER], &upper->lower); - continue; - } - - if (!upper->checked) { - /* - * Still want to blow up for developers since this is a - * logic bug. - */ - ASSERT(0); - err = -EINVAL; - goto out; - } - if (cowonly != upper->cowonly) { - ASSERT(0); - err = -EINVAL; - goto out; - } - - if (!cowonly) { - rb_node = tree_insert(&cache->rb_root, upper->bytenr, - &upper->rb_node); - if (rb_node) - backref_tree_panic(rb_node, -EEXIST, - upper->bytenr); + edge = list_first_entry_or_null(&cache->pending_edge, + struct btrfs_backref_edge, list[UPPER]); + /* + * The pending list isn't empty, take the first block to + * process + */ + if (edge) { + list_del_init(&edge->list[UPPER]); + cur = edge->node[UPPER]; } + } while (edge); - list_add_tail(&edge->list[UPPER], &upper->lower); - - list_for_each_entry(edge, &upper->upper, list[LOWER]) - list_add_tail(&edge->list[UPPER], &list); + /* Finish the upper linkage of newly added edges/nodes */ + ret = btrfs_backref_finish_upper_links(cache, node); + if (ret < 0) { + err = ret; + goto out; } - /* - * process useless backref nodes. backref nodes for tree leaves - * are deleted from the cache. backref nodes for upper level - * tree blocks are left in the cache to avoid unnecessary backref - * lookup. - */ - while (!list_empty(&useless)) { - upper = list_entry(useless.next, struct backref_node, list); - list_del_init(&upper->list); - ASSERT(list_empty(&upper->upper)); - if (upper == node) - node = NULL; - if (upper->lowest) { - list_del_init(&upper->lower); - upper->lowest = 0; - } - while (!list_empty(&upper->lower)) { - edge = list_entry(upper->lower.next, - struct backref_edge, list[UPPER]); - list_del(&edge->list[UPPER]); - list_del(&edge->list[LOWER]); - lower = edge->node[LOWER]; - free_backref_edge(cache, edge); - if (list_empty(&lower->upper)) - list_add(&lower->list, &useless); - } - __mark_block_processed(rc, upper); - if (upper->level > 0) { - list_add(&upper->list, &cache->detached); - upper->detached = 1; - } else { - rb_erase(&upper->rb_node, &cache->rb_root); - free_backref_node(cache, upper); - } - } + if (handle_useless_nodes(rc, node)) + node = NULL; out: - btrfs_free_path(path1); - btrfs_free_path(path2); + btrfs_backref_iter_free(iter); + btrfs_free_path(path); if (err) { - while (!list_empty(&useless)) { - lower = list_entry(useless.next, - struct backref_node, list); - list_del_init(&lower->list); - } - while (!list_empty(&list)) { - edge = list_first_entry(&list, struct backref_edge, - list[UPPER]); - list_del(&edge->list[UPPER]); - list_del(&edge->list[LOWER]); - lower = edge->node[LOWER]; - upper = edge->node[UPPER]; - free_backref_edge(cache, edge); - - /* - * Lower is no longer linked to any upper backref nodes - * and isn't in the cache, we can free it ourselves. - */ - if (list_empty(&lower->upper) && - RB_EMPTY_NODE(&lower->rb_node)) - list_add(&lower->list, &useless); - - if (!RB_EMPTY_NODE(&upper->rb_node)) - continue; - - /* Add this guy's upper edges to the list to process */ - list_for_each_entry(edge, &upper->upper, list[LOWER]) - list_add_tail(&edge->list[UPPER], &list); - if (list_empty(&upper->upper)) - list_add(&upper->list, &useless); - } - - while (!list_empty(&useless)) { - lower = list_entry(useless.next, - struct backref_node, list); - list_del_init(&lower->list); - if (lower == node) - node = NULL; - free_backref_node(cache, lower); - } - - remove_backref_node(cache, node); + btrfs_backref_error_cleanup(cache, node); return ERR_PTR(err); } ASSERT(!node || !node->detached); + ASSERT(list_empty(&cache->useless_node) && + list_empty(&cache->pending_edge)); return node; } @@ -1244,19 +538,19 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, struct btrfs_root *dest) { struct btrfs_root *reloc_root = src->reloc_root; - struct backref_cache *cache = &rc->backref_cache; - struct backref_node *node = NULL; - struct backref_node *new_node; - struct backref_edge *edge; - struct backref_edge *new_edge; + struct btrfs_backref_cache *cache = &rc->backref_cache; + struct btrfs_backref_node *node = NULL; + struct btrfs_backref_node *new_node; + struct btrfs_backref_edge *edge; + struct btrfs_backref_edge *new_edge; struct rb_node *rb_node; if (cache->last_trans > 0) update_backref_cache(trans, cache); - rb_node = tree_search(&cache->rb_root, src->commit_root->start); + rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); if (rb_node) { - node = rb_entry(rb_node, struct backref_node, rb_node); + node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); if (node->detached) node = NULL; else @@ -1264,10 +558,10 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, } if (!node) { - rb_node = tree_search(&cache->rb_root, - reloc_root->commit_root->start); + rb_node = rb_simple_search(&cache->rb_root, + reloc_root->commit_root->start); if (rb_node) { - node = rb_entry(rb_node, struct backref_node, + node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); BUG_ON(node->detached); } @@ -1276,12 +570,11 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, if (!node) return 0; - new_node = alloc_backref_node(cache); + new_node = btrfs_backref_alloc_node(cache, dest->node->start, + node->level); if (!new_node) return -ENOMEM; - new_node->bytenr = dest->node->start; - new_node->level = node->level; new_node->lowest = node->lowest; new_node->checked = 1; new_node->root = btrfs_grab_root(dest); @@ -1289,23 +582,21 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, if (!node->lowest) { list_for_each_entry(edge, &node->lower, list[UPPER]) { - new_edge = alloc_backref_edge(cache); + new_edge = btrfs_backref_alloc_edge(cache); if (!new_edge) goto fail; - new_edge->node[UPPER] = new_node; - new_edge->node[LOWER] = edge->node[LOWER]; - list_add_tail(&new_edge->list[UPPER], - &new_node->lower); + btrfs_backref_link_edge(new_edge, edge->node[LOWER], + new_node, LINK_UPPER); } } else { list_add_tail(&new_node->lower, &cache->leaves); } - rb_node = tree_insert(&cache->rb_root, new_node->bytenr, - &new_node->rb_node); + rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr, + &new_node->rb_node); if (rb_node) - backref_tree_panic(rb_node, -EEXIST, new_node->bytenr); + btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST); if (!new_node->lowest) { list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { @@ -1317,11 +608,11 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, fail: while (!list_empty(&new_node->lower)) { new_edge = list_entry(new_node->lower.next, - struct backref_edge, list[UPPER]); + struct btrfs_backref_edge, list[UPPER]); list_del(&new_edge->list[UPPER]); - free_backref_edge(cache, new_edge); + btrfs_backref_free_edge(cache, new_edge); } - free_backref_node(cache, new_node); + btrfs_backref_free_node(cache, new_node); return -ENOMEM; } @@ -1343,8 +634,8 @@ static int __must_check __add_reloc_root(struct btrfs_root *root) node->data = root; spin_lock(&rc->reloc_root_tree.lock); - rb_node = tree_insert(&rc->reloc_root_tree.rb_root, - node->bytenr, &node->rb_node); + rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, + node->bytenr, &node->rb_node); spin_unlock(&rc->reloc_root_tree.lock); if (rb_node) { btrfs_panic(fs_info, -EEXIST, @@ -1370,8 +661,8 @@ static void __del_reloc_root(struct btrfs_root *root) if (rc && root->node) { spin_lock(&rc->reloc_root_tree.lock); - rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->commit_root->start); + rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, + root->commit_root->start); if (rb_node) { node = rb_entry(rb_node, struct mapping_node, rb_node); rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); @@ -1414,8 +705,8 @@ static int __update_reloc_root(struct btrfs_root *root) struct reloc_control *rc = fs_info->reloc_ctl; spin_lock(&rc->reloc_root_tree.lock); - rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->commit_root->start); + rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, + root->commit_root->start); if (rb_node) { node = rb_entry(rb_node, struct mapping_node, rb_node); rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); @@ -1428,11 +719,11 @@ static int __update_reloc_root(struct btrfs_root *root) spin_lock(&rc->reloc_root_tree.lock); node->bytenr = root->node->start; - rb_node = tree_insert(&rc->reloc_root_tree.rb_root, - node->bytenr, &node->rb_node); + rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, + node->bytenr, &node->rb_node); spin_unlock(&rc->reloc_root_tree.lock); if (rb_node) - backref_tree_panic(rb_node, -EEXIST, node->bytenr); + btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); return 0; } @@ -1505,7 +796,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); BUG_ON(IS_ERR(reloc_root)); - set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state); + set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); reloc_root->last_trans = trans->transid; return reloc_root; } @@ -1679,14 +970,6 @@ again: return NULL; } -static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group) -{ - if (bytenr >= block_group->start && - bytenr < block_group->start + block_group->length) - return 1; - return 0; -} - /* * get new location of data */ @@ -1784,7 +1067,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans, num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); if (bytenr == 0) continue; - if (!in_block_group(bytenr, rc->block_group)) + if (!in_range(bytenr, rc->block_group->start, + rc->block_group->length)) continue; /* @@ -1940,7 +1224,7 @@ again: level = btrfs_header_level(parent); BUG_ON(level < lowest_level); - ret = btrfs_bin_search(parent, &key, level, &slot); + ret = btrfs_bin_search(parent, &key, &slot); if (ret < 0) break; if (ret && slot > 0) @@ -2560,7 +1844,8 @@ again: struct btrfs_root, root_list); list_del_init(&reloc_root->root_list); - root = read_fs_root(fs_info, reloc_root->root_key.offset); + root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, + false); BUG_ON(IS_ERR(root)); BUG_ON(root->reloc_root != reloc_root); @@ -2588,13 +1873,10 @@ again: static noinline_for_stack void free_reloc_roots(struct list_head *list) { - struct btrfs_root *reloc_root; + struct btrfs_root *reloc_root, *tmp; - while (!list_empty(list)) { - reloc_root = list_entry(list->next, struct btrfs_root, - root_list); + list_for_each_entry_safe(reloc_root, tmp, list, root_list) __del_reloc_root(reloc_root); - } } static noinline_for_stack @@ -2624,12 +1906,11 @@ again: reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); + root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, + false); if (btrfs_root_refs(&reloc_root->root_item) > 0) { - root = read_fs_root(fs_info, - reloc_root->root_key.offset); BUG_ON(IS_ERR(root)); BUG_ON(root->reloc_root != reloc_root); - ret = merge_reloc_root(rc, root); btrfs_put_root(root); if (ret) { @@ -2639,6 +1920,16 @@ again: goto out; } } else { + if (!IS_ERR(root)) { + if (root->reloc_root == reloc_root) { + root->reloc_root = NULL; + btrfs_put_root(reloc_root); + } + clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, + &root->state); + btrfs_put_root(root); + } + list_del_init(&reloc_root->root_list); /* Don't forget to queue this reloc root for cleanup */ list_add_tail(&reloc_root->reloc_dirty_list, @@ -2653,15 +1944,13 @@ again: out: if (ret) { btrfs_handle_fs_error(fs_info, ret, NULL); - if (!list_empty(&reloc_roots)) - free_reloc_roots(&reloc_roots); + free_reloc_roots(&reloc_roots); /* new reloc root may be added */ mutex_lock(&fs_info->reloc_mutex); list_splice_init(&rc->reloc_roots, &reloc_roots); mutex_unlock(&fs_info->reloc_mutex); - if (!list_empty(&reloc_roots)) - free_reloc_roots(&reloc_roots); + free_reloc_roots(&reloc_roots); } /* @@ -2702,7 +1991,7 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, if (reloc_root->last_trans == trans->transid) return 0; - root = read_fs_root(fs_info, reloc_root->root_key.offset); + root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); BUG_ON(IS_ERR(root)); BUG_ON(root->reloc_root != reloc_root); ret = btrfs_record_root_in_trans(trans, root); @@ -2714,10 +2003,10 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, static noinline_for_stack struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, struct reloc_control *rc, - struct backref_node *node, - struct backref_edge *edges[]) + struct btrfs_backref_node *node, + struct btrfs_backref_edge *edges[]) { - struct backref_node *next; + struct btrfs_backref_node *next; struct btrfs_root *root; int index = 0; @@ -2727,7 +2016,7 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, next = walk_up_backref(next, edges, &index); root = next->root; BUG_ON(!root); - BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state)); + BUG_ON(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)); if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { record_reloc_root_in_trans(trans, root); @@ -2746,7 +2035,7 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, ASSERT(next->root); list_add_tail(&next->list, &rc->backref_cache.changed); - __mark_block_processed(rc, next); + mark_block_processed(rc, next); break; } @@ -2771,18 +2060,21 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, } /* - * select a tree root for relocation. return NULL if the block - * is reference counted. we should use do_relocation() in this - * case. return a tree root pointer if the block isn't reference - * counted. return -ENOENT if the block is root of reloc tree. + * Select a tree root for relocation. + * + * Return NULL if the block is not shareable. We should use do_relocation() in + * this case. + * + * Return a tree root pointer if the block is shareable. + * Return -ENOENT if the block is root of reloc tree. */ static noinline_for_stack -struct btrfs_root *select_one_root(struct backref_node *node) +struct btrfs_root *select_one_root(struct btrfs_backref_node *node) { - struct backref_node *next; + struct btrfs_backref_node *next; struct btrfs_root *root; struct btrfs_root *fs_root = NULL; - struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; + struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; int index = 0; next = node; @@ -2792,8 +2084,8 @@ struct btrfs_root *select_one_root(struct backref_node *node) root = next->root; BUG_ON(!root); - /* no other choice for non-references counted tree */ - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) + /* No other choice for non-shareable tree */ + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return root; if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) @@ -2814,12 +2106,12 @@ struct btrfs_root *select_one_root(struct backref_node *node) static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc, - struct backref_node *node, int reserve) + struct btrfs_backref_node *node, int reserve) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - struct backref_node *next = node; - struct backref_edge *edge; - struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; + struct btrfs_backref_node *next = node; + struct btrfs_backref_edge *edge; + struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; u64 num_bytes = 0; int index = 0; @@ -2837,7 +2129,7 @@ u64 calcu_metadata_size(struct reloc_control *rc, break; edge = list_entry(next->upper.next, - struct backref_edge, list[LOWER]); + struct btrfs_backref_edge, list[LOWER]); edges[index++] = edge; next = edge->node[UPPER]; } @@ -2848,7 +2140,7 @@ u64 calcu_metadata_size(struct reloc_control *rc, static int reserve_metadata_space(struct btrfs_trans_handle *trans, struct reloc_control *rc, - struct backref_node *node) + struct btrfs_backref_node *node) { struct btrfs_root *root = rc->extent_root; struct btrfs_fs_info *fs_info = root->fs_info; @@ -2896,14 +2188,14 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans, */ static int do_relocation(struct btrfs_trans_handle *trans, struct reloc_control *rc, - struct backref_node *node, + struct btrfs_backref_node *node, struct btrfs_key *key, struct btrfs_path *path, int lowest) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - struct backref_node *upper; - struct backref_edge *edge; - struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; + struct btrfs_backref_node *upper; + struct btrfs_backref_edge *edge; + struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; struct btrfs_root *root; struct extent_buffer *eb; u32 blocksize; @@ -2929,8 +2221,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, if (upper->eb && !upper->locked) { if (!lowest) { - ret = btrfs_bin_search(upper->eb, key, - upper->level, &slot); + ret = btrfs_bin_search(upper->eb, key, &slot); if (ret < 0) { err = ret; goto next; @@ -2940,7 +2231,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, if (node->eb->start == bytenr) goto next; } - drop_node_buffer(upper); + btrfs_backref_drop_node_buffer(upper); } if (!upper->eb) { @@ -2968,8 +2259,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, slot = path->slots[upper->level]; btrfs_release_path(path); } else { - ret = btrfs_bin_search(upper->eb, key, upper->level, - &slot); + ret = btrfs_bin_search(upper->eb, key, &slot); if (ret < 0) { err = ret; goto next; @@ -3039,15 +2329,15 @@ static int do_relocation(struct btrfs_trans_handle *trans, } next: if (!upper->pending) - drop_node_buffer(upper); + btrfs_backref_drop_node_buffer(upper); else - unlock_node_buffer(upper); + btrfs_backref_unlock_node_buffer(upper); if (err) break; } if (!err && node->pending) { - drop_node_buffer(node); + btrfs_backref_drop_node_buffer(node); list_move_tail(&node->list, &rc->backref_cache.changed); node->pending = 0; } @@ -3059,7 +2349,7 @@ next: static int link_to_upper(struct btrfs_trans_handle *trans, struct reloc_control *rc, - struct backref_node *node, + struct btrfs_backref_node *node, struct btrfs_path *path) { struct btrfs_key key; @@ -3073,15 +2363,15 @@ static int finish_pending_nodes(struct btrfs_trans_handle *trans, struct btrfs_path *path, int err) { LIST_HEAD(list); - struct backref_cache *cache = &rc->backref_cache; - struct backref_node *node; + struct btrfs_backref_cache *cache = &rc->backref_cache; + struct btrfs_backref_node *node; int level; int ret; for (level = 0; level < BTRFS_MAX_LEVEL; level++) { while (!list_empty(&cache->pending[level])) { node = list_entry(cache->pending[level].next, - struct backref_node, list); + struct btrfs_backref_node, list); list_move_tail(&node->list, &list); BUG_ON(!node->pending); @@ -3096,35 +2386,16 @@ static int finish_pending_nodes(struct btrfs_trans_handle *trans, return err; } -static void mark_block_processed(struct reloc_control *rc, - u64 bytenr, u32 blocksize) -{ - set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, - EXTENT_DIRTY); -} - -static void __mark_block_processed(struct reloc_control *rc, - struct backref_node *node) -{ - u32 blocksize; - if (node->level == 0 || - in_block_group(node->bytenr, rc->block_group)) { - blocksize = rc->extent_root->fs_info->nodesize; - mark_block_processed(rc, node->bytenr, blocksize); - } - node->processed = 1; -} - /* * mark a block and all blocks directly/indirectly reference the block * as processed. */ static void update_processed_blocks(struct reloc_control *rc, - struct backref_node *node) + struct btrfs_backref_node *node) { - struct backref_node *next = node; - struct backref_edge *edge; - struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; + struct btrfs_backref_node *next = node; + struct btrfs_backref_edge *edge; + struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; int index = 0; while (next) { @@ -3133,13 +2404,13 @@ static void update_processed_blocks(struct reloc_control *rc, if (next->processed) break; - __mark_block_processed(rc, next); + mark_block_processed(rc, next); if (list_empty(&next->upper)) break; edge = list_entry(next->upper.next, - struct backref_edge, list[LOWER]); + struct btrfs_backref_edge, list[LOWER]); edges[index++] = edge; next = edge->node[UPPER]; } @@ -3184,7 +2455,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info, */ static int relocate_tree_block(struct btrfs_trans_handle *trans, struct reloc_control *rc, - struct backref_node *node, + struct btrfs_backref_node *node, struct btrfs_key *key, struct btrfs_path *path) { @@ -3210,7 +2481,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, } if (root) { - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { BUG_ON(node->new_bytenr); BUG_ON(!list_empty(&node->list)); btrfs_record_root_in_trans(trans, root); @@ -3234,7 +2505,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, } out: if (ret || node->level == 0 || node->cowonly) - remove_backref_node(&rc->backref_cache, node); + btrfs_backref_cleanup_node(&rc->backref_cache, node); return ret; } @@ -3246,7 +2517,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, struct reloc_control *rc, struct rb_root *blocks) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - struct backref_node *node; + struct btrfs_backref_node *node; struct btrfs_path *path; struct tree_block *block; struct tree_block *next; @@ -3613,9 +2884,10 @@ static int add_tree_block(struct reloc_control *rc, block->level = level; block->key_ready = 0; - rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); + rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); if (rb_node) - backref_tree_panic(rb_node, -EEXIST, block->bytenr); + btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, + -EEXIST); return 0; } @@ -3636,7 +2908,7 @@ static int __add_tree_block(struct reloc_control *rc, if (tree_block_processed(bytenr, rc)) return 0; - if (tree_search(blocks, bytenr)) + if (rb_simple_search(blocks, bytenr)) return 0; path = btrfs_alloc_path(); @@ -3698,7 +2970,6 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, struct inode *inode, u64 ino) { - struct btrfs_key key; struct btrfs_root *root = fs_info->tree_root; struct btrfs_trans_handle *trans; int ret = 0; @@ -3706,11 +2977,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, if (inode) goto truncate; - key.objectid = ino; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - - inode = btrfs_iget(fs_info->sb, &key, root); + inode = btrfs_iget(fs_info->sb, ino, root); if (IS_ERR(inode)) return -ENOENT; @@ -4122,7 +3389,7 @@ restart: rc->create_reloc_tree = 0; set_reloc_control(rc); - backref_cache_cleanup(&rc->backref_cache); + btrfs_backref_release_cache(&rc->backref_cache); btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); /* @@ -4198,14 +3465,10 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, struct inode *inode = NULL; struct btrfs_trans_handle *trans; struct btrfs_root *root; - struct btrfs_key key; u64 objectid; int err = 0; - root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); - if (IS_ERR(root)) - return ERR_CAST(root); - + root = btrfs_grab_root(fs_info->data_reloc_root); trans = btrfs_start_transaction(root, 6); if (IS_ERR(trans)) { btrfs_put_root(root); @@ -4219,10 +3482,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, err = __insert_orphan_inode(trans, root, objectid); BUG_ON(err); - key.objectid = objectid; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root); + inode = btrfs_iget(fs_info->sb, objectid, root); BUG_ON(IS_ERR(inode)); BTRFS_I(inode)->index_cnt = group->start; @@ -4249,7 +3509,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) INIT_LIST_HEAD(&rc->reloc_roots); INIT_LIST_HEAD(&rc->dirty_subvol_roots); - backref_cache_init(&rc->backref_cache); + btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1); mapping_tree_init(&rc->reloc_root_tree); extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS, NULL); @@ -4494,12 +3754,12 @@ int btrfs_recover_relocation(struct btrfs_root *root) goto out; } - set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state); + set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); list_add(&reloc_root->root_list, &reloc_roots); if (btrfs_root_refs(&reloc_root->root_item) > 0) { - fs_root = read_fs_root(fs_info, - reloc_root->root_key.offset); + fs_root = btrfs_get_fs_root(fs_info, + reloc_root->root_key.offset, false); if (IS_ERR(fs_root)) { ret = PTR_ERR(fs_root); if (ret != -ENOENT) { @@ -4555,7 +3815,8 @@ int btrfs_recover_relocation(struct btrfs_root *root) continue; } - fs_root = read_fs_root(fs_info, reloc_root->root_key.offset); + fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, + false); if (IS_ERR(fs_root)) { err = PTR_ERR(fs_root); list_add_tail(&reloc_root->root_list, &reloc_roots); @@ -4591,20 +3852,16 @@ out_unset: unset_reloc_control(rc); free_reloc_control(rc); out: - if (!list_empty(&reloc_roots)) - free_reloc_roots(&reloc_roots); + free_reloc_roots(&reloc_roots); btrfs_free_path(path); if (err == 0) { /* cleanup orphan inode in data relocation tree */ - fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); - if (IS_ERR(fs_root)) { - err = PTR_ERR(fs_root); - } else { - err = btrfs_orphan_cleanup(fs_root); - btrfs_put_root(fs_root); - } + fs_root = btrfs_grab_root(fs_info->data_reloc_root); + ASSERT(fs_root); + err = btrfs_orphan_cleanup(fs_root); + btrfs_put_root(fs_root); } return err; } @@ -4666,7 +3923,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = root->fs_info; struct reloc_control *rc; - struct backref_node *node; + struct btrfs_backref_node *node; int first_cow = 0; int level; int ret = 0; @@ -4691,7 +3948,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, BUG_ON(node->bytenr != buf->start && node->new_bytenr != buf->start); - drop_node_buffer(node); + btrfs_backref_drop_node_buffer(node); atomic_inc(&cow->refs); node->eb = cow; node->new_bytenr = cow->start; @@ -4703,7 +3960,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, } if (first_cow) - __mark_block_processed(rc, node); + mark_block_processed(rc, node); if (first_cow && level > 0) rc->nodes_relocated += buf->len; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 668f22844017..c89697486366 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -210,7 +210,6 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info) struct extent_buffer *leaf; struct btrfs_path *path; struct btrfs_key key; - struct btrfs_key root_key; struct btrfs_root *root; int err = 0; int ret; @@ -223,10 +222,9 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info) key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = 0; - root_key.type = BTRFS_ROOT_ITEM_KEY; - root_key.offset = (u64)-1; - while (1) { + u64 root_objectid; + ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); if (ret < 0) { err = ret; @@ -250,10 +248,10 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info) key.type != BTRFS_ORPHAN_ITEM_KEY) break; - root_key.objectid = key.offset; + root_objectid = key.offset; key.offset++; - root = btrfs_get_fs_root(fs_info, &root_key, false); + root = btrfs_get_fs_root(fs_info, root_objectid, false); err = PTR_ERR_OR_ZERO(root); if (err && err != -ENOENT) { break; @@ -270,7 +268,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info) break; } err = btrfs_del_orphan_item(trans, tree_root, - root_key.objectid); + root_objectid); btrfs_end_transaction(trans); if (err) { btrfs_handle_fs_error(fs_info, err, diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index adaf8ab694d5..016a025e36c7 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -647,13 +647,9 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, struct btrfs_fs_info *fs_info = swarn->dev->fs_info; struct inode_fs_paths *ipath = NULL; struct btrfs_root *local_root; - struct btrfs_key root_key; struct btrfs_key key; - root_key.objectid = root; - root_key.type = BTRFS_ROOT_ITEM_KEY; - root_key.offset = (u64)-1; - local_root = btrfs_get_fs_root(fs_info, &root_key, true); + local_root = btrfs_get_fs_root(fs_info, root, true); if (IS_ERR(local_root)) { ret = PTR_ERR(local_root); goto err; @@ -3046,7 +3042,8 @@ out: static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *scrub_dev, - int num, u64 base, u64 length) + int num, u64 base, u64 length, + struct btrfs_block_group *cache) { struct btrfs_path *path, *ppath; struct btrfs_fs_info *fs_info = sctx->fs_info; @@ -3284,6 +3281,20 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, break; } + /* + * If our block group was removed in the meanwhile, just + * stop scrubbing since there is no point in continuing. + * Continuing would prevent reusing its device extents + * for new block groups for a long time. + */ + spin_lock(&cache->lock); + if (cache->removed) { + spin_unlock(&cache->lock); + ret = 0; + goto out; + } + spin_unlock(&cache->lock); + extent = btrfs_item_ptr(l, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(l, extent); @@ -3328,13 +3339,14 @@ again: &extent_dev, &extent_mirror_num); - ret = btrfs_lookup_csums_range(csum_root, - extent_logical, - extent_logical + - extent_len - 1, - &sctx->csum_list, 1); - if (ret) - goto out; + if (flags & BTRFS_EXTENT_FLAG_DATA) { + ret = btrfs_lookup_csums_range(csum_root, + extent_logical, + extent_logical + extent_len - 1, + &sctx->csum_list, 1); + if (ret) + goto out; + } ret = scrub_extent(sctx, map, extent_logical, extent_len, extent_physical, extent_dev, flags, @@ -3457,7 +3469,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, if (map->stripes[i].dev->bdev == scrub_dev->bdev && map->stripes[i].physical == dev_offset) { ret = scrub_stripe(sctx, map, scrub_dev, i, - chunk_offset, length); + chunk_offset, length, cache); if (ret) goto out; } @@ -3555,6 +3567,23 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, goto skip; /* + * Make sure that while we are scrubbing the corresponding block + * group doesn't get its logical address and its device extents + * reused for another block group, which can possibly be of a + * different type and different profile. We do this to prevent + * false error detections and crashes due to bogus attempts to + * repair extents. + */ + spin_lock(&cache->lock); + if (cache->removed) { + spin_unlock(&cache->lock); + btrfs_put_block_group(cache); + goto skip; + } + btrfs_freeze_block_group(cache); + spin_unlock(&cache->lock); + + /* * we need call btrfs_inc_block_group_ro() with scrubs_paused, * to avoid deadlock caused by: * btrfs_inc_block_group_ro() @@ -3609,6 +3638,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, } else { btrfs_warn(fs_info, "failed setting block group ro: %d", ret); + btrfs_unfreeze_block_group(cache); btrfs_put_block_group(cache); scrub_pause_off(fs_info); break; @@ -3695,6 +3725,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, spin_unlock(&cache->lock); } + btrfs_unfreeze_block_group(cache); btrfs_put_block_group(cache); if (ret) break; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index c5f41bd86765..d9813a5b075a 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -23,6 +23,7 @@ #include "btrfs_inode.h" #include "transaction.h" #include "compression.h" +#include "xattr.h" /* * Maximum number of references an extent can have in order for us to attempt to @@ -4545,6 +4546,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, struct fs_path *p; struct posix_acl_xattr_header dummy_acl; + /* Capabilities are emitted by finish_inode_if_needed */ + if (!strncmp(name, XATTR_NAME_CAPS, name_len)) + return 0; + p = fs_path_alloc(); if (!p) return -ENOMEM; @@ -4801,17 +4806,12 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) struct inode *inode; struct page *page; char *addr; - struct btrfs_key key; pgoff_t index = offset >> PAGE_SHIFT; pgoff_t last_index; unsigned pg_offset = offset_in_page(offset); ssize_t ret = 0; - key.objectid = sctx->cur_ino; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - - inode = btrfs_iget(fs_info->sb, &key, root); + inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root); if (IS_ERR(inode)) return PTR_ERR(inode); @@ -5107,6 +5107,64 @@ static int send_extent_data(struct send_ctx *sctx, return 0; } +/* + * Search for a capability xattr related to sctx->cur_ino. If the capability is + * found, call send_set_xattr function to emit it. + * + * Return 0 if there isn't a capability, or when the capability was emitted + * successfully, or < 0 if an error occurred. + */ +static int send_capabilities(struct send_ctx *sctx) +{ + struct fs_path *fspath = NULL; + struct btrfs_path *path; + struct btrfs_dir_item *di; + struct extent_buffer *leaf; + unsigned long data_ptr; + char *buf = NULL; + int buf_len; + int ret = 0; + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + + di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, + XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); + if (!di) { + /* There is no xattr for this inode */ + goto out; + } else if (IS_ERR(di)) { + ret = PTR_ERR(di); + goto out; + } + + leaf = path->nodes[0]; + buf_len = btrfs_dir_data_len(leaf, di); + + fspath = fs_path_alloc(); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!fspath || !buf) { + ret = -ENOMEM; + goto out; + } + + ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); + if (ret < 0) + goto out; + + data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); + read_extent_buffer(leaf, buf, data_ptr, buf_len); + + ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, + strlen(XATTR_NAME_CAPS), buf, buf_len); +out: + kfree(buf); + fs_path_free(fspath); + btrfs_free_path(path); + return ret; +} + static int clone_range(struct send_ctx *sctx, struct clone_root *clone_root, const u64 disk_byte, @@ -5972,6 +6030,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) goto out; } + ret = send_capabilities(sctx); + if (ret < 0) + goto out; + /* * If other directory inodes depended on our current directory * inode's move/rename, now do their move/rename operations. @@ -7021,7 +7083,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root; struct btrfs_fs_info *fs_info = send_root->fs_info; struct btrfs_root *clone_root; - struct btrfs_key key; struct send_ctx *sctx = NULL; u32 i; u64 *clone_sources_tmp = NULL; @@ -7065,13 +7126,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) goto out; } - if (!access_ok(arg->clone_sources, - sizeof(*arg->clone_sources) * - arg->clone_sources_count)) { - ret = -EFAULT; - goto out; - } - if (arg->flags & ~BTRFS_SEND_FLAG_MASK) { ret = -EINVAL; goto out; @@ -7150,11 +7204,8 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) } for (i = 0; i < arg->clone_sources_count; i++) { - key.objectid = clone_sources_tmp[i]; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - - clone_root = btrfs_get_fs_root(fs_info, &key, true); + clone_root = btrfs_get_fs_root(fs_info, + clone_sources_tmp[i], true); if (IS_ERR(clone_root)) { ret = PTR_ERR(clone_root); goto out; @@ -7185,11 +7236,8 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) } if (arg->parent_root) { - key.objectid = arg->parent_root; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - - sctx->parent_root = btrfs_get_fs_root(fs_info, &key, true); + sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root, + true); if (IS_ERR(sctx->parent_root)) { ret = PTR_ERR(sctx->parent_root); goto out; diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index ff17a4420358..41ee88633769 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -626,6 +626,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, struct reserve_ticket *ticket = NULL; struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; + struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; struct btrfs_trans_handle *trans; u64 bytes_needed; u64 reclaim_bytes = 0; @@ -688,6 +689,11 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, spin_lock(&delayed_refs_rsv->lock); reclaim_bytes += delayed_refs_rsv->reserved; spin_unlock(&delayed_refs_rsv->lock); + + spin_lock(&trans_rsv->lock); + reclaim_bytes += trans_rsv->reserved; + spin_unlock(&trans_rsv->lock); + if (reclaim_bytes >= bytes_needed) goto commit; bytes_needed -= reclaim_bytes; @@ -856,6 +862,34 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); } +static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info, + struct reserve_ticket *ticket) +{ + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; + u64 min_bytes; + + if (global_rsv->space_info != space_info) + return false; + + spin_lock(&global_rsv->lock); + min_bytes = div_factor(global_rsv->size, 1); + if (global_rsv->reserved < min_bytes + ticket->bytes) { + spin_unlock(&global_rsv->lock); + return false; + } + global_rsv->reserved -= ticket->bytes; + ticket->bytes = 0; + list_del_init(&ticket->list); + wake_up(&ticket->wait); + space_info->tickets_id++; + if (global_rsv->reserved < global_rsv->size) + global_rsv->full = 0; + spin_unlock(&global_rsv->lock); + + return true; +} + /* * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets * @fs_info - fs_info for this fs @@ -888,6 +922,10 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, ticket = list_first_entry(&space_info->tickets, struct reserve_ticket, list); + if (ticket->steal && + steal_from_global_rsv(fs_info, space_info, ticket)) + return true; + /* * may_commit_transaction will avoid committing the transaction * if it doesn't feel like the space reclaimed by the commit @@ -1104,6 +1142,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, switch (flush) { case BTRFS_RESERVE_FLUSH_ALL: + case BTRFS_RESERVE_FLUSH_ALL_STEAL: wait_reserve_ticket(fs_info, space_info, ticket); break; case BTRFS_RESERVE_FLUSH_LIMIT: @@ -1125,11 +1164,17 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, ret = ticket->error; if (ticket->bytes || ticket->error) { /* - * Need to delete here for priority tickets. For regular tickets - * either the async reclaim job deletes the ticket from the list - * or we delete it ourselves at wait_reserve_ticket(). + * We were a priority ticket, so we need to delete ourselves + * from the list. Because we could have other priority tickets + * behind us that require less space, run + * btrfs_try_granting_tickets() to see if their reservations can + * now be made. */ - remove_ticket(space_info, ticket); + if (!list_empty(&ticket->list)) { + remove_ticket(space_info, ticket); + btrfs_try_granting_tickets(fs_info, space_info); + } + if (!ret) ret = -ENOSPC; } @@ -1145,6 +1190,16 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, return ret; } +/* + * This returns true if this flush state will go through the ordinary flushing + * code. + */ +static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) +{ + return (flush == BTRFS_RESERVE_FLUSH_ALL) || + (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); +} + /** * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space * @root - the root we're allocating for @@ -1175,8 +1230,17 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, spin_lock(&space_info->lock); ret = -ENOSPC; used = btrfs_space_info_used(space_info, true); - pending_tickets = !list_empty(&space_info->tickets) || - !list_empty(&space_info->priority_tickets); + + /* + * We don't want NO_FLUSH allocations to jump everybody, they can + * generally handle ENOSPC in a different way, so treat them the same as + * normal flushers when it comes to skipping pending tickets. + */ + if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) + pending_tickets = !list_empty(&space_info->tickets) || + !list_empty(&space_info->priority_tickets); + else + pending_tickets = !list_empty(&space_info->priority_tickets); /* * Carry on if we have enough space (short-circuit) OR call @@ -1198,12 +1262,13 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, * the list and we will do our own flushing further down. */ if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { - ASSERT(space_info->reclaim_size >= 0); ticket.bytes = orig_bytes; ticket.error = 0; space_info->reclaim_size += ticket.bytes; init_waitqueue_head(&ticket.wait); - if (flush == BTRFS_RESERVE_FLUSH_ALL) { + ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); + if (flush == BTRFS_RESERVE_FLUSH_ALL || + flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { list_add_tail(&ticket.list, &space_info->tickets); if (!space_info->flush) { space_info->flush = 1; diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 0a5001ef1481..c3c64019950a 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -78,6 +78,7 @@ struct btrfs_space_info { struct reserve_ticket { u64 bytes; int error; + bool steal; struct list_head list; wait_queue_head_t wait; }; diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index 73f7987143df..079b059818e9 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -17,151 +17,152 @@ static inline void put_unaligned_le8(u8 val, void *p) *(u8 *)p = val; } +static bool check_setget_bounds(const struct extent_buffer *eb, + const void *ptr, unsigned off, int size) +{ + const unsigned long member_offset = (unsigned long)ptr + off; + + if (member_offset > eb->len) { + btrfs_warn(eb->fs_info, + "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d", + (unsigned long)ptr, eb->start, member_offset, size); + return false; + } + if (member_offset + size > eb->len) { + btrfs_warn(eb->fs_info, + "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d", + (unsigned long)ptr, eb->start, member_offset, size); + return false; + } + + return true; +} + /* - * this is some deeply nasty code. + * Macro templates that define helpers to read/write extent buffer data of a + * given size, that are also used via ctree.h for access to item members by + * specialized helpers. * - * The end result is that anyone who #includes ctree.h gets a - * declaration for the btrfs_set_foo functions and btrfs_foo functions, - * which are wrappers of btrfs_set_token_#bits functions and - * btrfs_get_token_#bits functions, which are defined in this file. + * Generic helpers: + * - btrfs_set_8 (for 8/16/32/64) + * - btrfs_get_8 (for 8/16/32/64) * - * These setget functions do all the extent_buffer related mapping - * required to efficiently read and write specific fields in the extent - * buffers. Every pointer to metadata items in btrfs is really just - * an unsigned long offset into the extent buffer which has been - * cast to a specific type. This gives us all the gcc type checking. + * Generic helpers with a token (cached address of the most recently accessed + * page): + * - btrfs_set_token_8 (for 8/16/32/64) + * - btrfs_get_token_8 (for 8/16/32/64) * - * The extent buffer api is used to do the page spanning work required to - * have a metadata blocksize different from the page size. + * The set/get functions handle data spanning two pages transparently, in case + * metadata block size is larger than page. Every pointer to metadata items is + * an offset into the extent buffer page array, cast to a specific type. This + * gives us all the type checking. * - * There are 2 variants defined, one with a token pointer and one without. + * The extent buffer pages stored in the array pages do not form a contiguous + * phyusical range, but the API functions assume the linear offset to the range + * from 0 to metadata node size. */ #define DEFINE_BTRFS_SETGET_BITS(bits) \ -u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ - const void *ptr, unsigned long off, \ - struct btrfs_map_token *token) \ +u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \ + const void *ptr, unsigned long off) \ { \ - unsigned long part_offset = (unsigned long)ptr; \ - unsigned long offset = part_offset + off; \ - void *p; \ - int err; \ - char *kaddr; \ - unsigned long map_start; \ - unsigned long map_len; \ - int size = sizeof(u##bits); \ - u##bits res; \ + const unsigned long member_offset = (unsigned long)ptr + off; \ + const unsigned long idx = member_offset >> PAGE_SHIFT; \ + const unsigned long oip = offset_in_page(member_offset); \ + const int size = sizeof(u##bits); \ + u8 lebytes[sizeof(u##bits)]; \ + const int part = PAGE_SIZE - oip; \ \ ASSERT(token); \ - ASSERT(token->eb == eb); \ - \ - if (token->kaddr && token->offset <= offset && \ - (token->offset + PAGE_SIZE >= offset + size)) { \ - kaddr = token->kaddr; \ - p = kaddr + part_offset - token->offset; \ - res = get_unaligned_le##bits(p + off); \ - return res; \ + ASSERT(token->kaddr); \ + ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ + if (token->offset <= member_offset && \ + member_offset + size <= token->offset + PAGE_SIZE) { \ + return get_unaligned_le##bits(token->kaddr + oip); \ } \ - err = map_private_extent_buffer(eb, offset, size, \ - &kaddr, &map_start, &map_len); \ - if (err) { \ - __le##bits leres; \ + token->kaddr = page_address(token->eb->pages[idx]); \ + token->offset = idx << PAGE_SHIFT; \ + if (oip + size <= PAGE_SIZE) \ + return get_unaligned_le##bits(token->kaddr + oip); \ \ - read_extent_buffer(eb, &leres, offset, size); \ - return le##bits##_to_cpu(leres); \ - } \ - p = kaddr + part_offset - map_start; \ - res = get_unaligned_le##bits(p + off); \ - token->kaddr = kaddr; \ - token->offset = map_start; \ - return res; \ + memcpy(lebytes, token->kaddr + oip, part); \ + token->kaddr = page_address(token->eb->pages[idx + 1]); \ + token->offset = (idx + 1) << PAGE_SHIFT; \ + memcpy(lebytes + part, token->kaddr, size - part); \ + return get_unaligned_le##bits(lebytes); \ } \ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ const void *ptr, unsigned long off) \ { \ - unsigned long part_offset = (unsigned long)ptr; \ - unsigned long offset = part_offset + off; \ - void *p; \ - int err; \ - char *kaddr; \ - unsigned long map_start; \ - unsigned long map_len; \ - int size = sizeof(u##bits); \ - u##bits res; \ + const unsigned long member_offset = (unsigned long)ptr + off; \ + const unsigned long oip = offset_in_page(member_offset); \ + const unsigned long idx = member_offset >> PAGE_SHIFT; \ + char *kaddr = page_address(eb->pages[idx]); \ + const int size = sizeof(u##bits); \ + const int part = PAGE_SIZE - oip; \ + u8 lebytes[sizeof(u##bits)]; \ \ - err = map_private_extent_buffer(eb, offset, size, \ - &kaddr, &map_start, &map_len); \ - if (err) { \ - __le##bits leres; \ + ASSERT(check_setget_bounds(eb, ptr, off, size)); \ + if (oip + size <= PAGE_SIZE) \ + return get_unaligned_le##bits(kaddr + oip); \ \ - read_extent_buffer(eb, &leres, offset, size); \ - return le##bits##_to_cpu(leres); \ - } \ - p = kaddr + part_offset - map_start; \ - res = get_unaligned_le##bits(p + off); \ - return res; \ + memcpy(lebytes, kaddr + oip, part); \ + kaddr = page_address(eb->pages[idx + 1]); \ + memcpy(lebytes + part, kaddr, size - part); \ + return get_unaligned_le##bits(lebytes); \ } \ -void btrfs_set_token_##bits(struct extent_buffer *eb, \ +void btrfs_set_token_##bits(struct btrfs_map_token *token, \ const void *ptr, unsigned long off, \ - u##bits val, \ - struct btrfs_map_token *token) \ + u##bits val) \ { \ - unsigned long part_offset = (unsigned long)ptr; \ - unsigned long offset = part_offset + off; \ - void *p; \ - int err; \ - char *kaddr; \ - unsigned long map_start; \ - unsigned long map_len; \ - int size = sizeof(u##bits); \ + const unsigned long member_offset = (unsigned long)ptr + off; \ + const unsigned long idx = member_offset >> PAGE_SHIFT; \ + const unsigned long oip = offset_in_page(member_offset); \ + const int size = sizeof(u##bits); \ + u8 lebytes[sizeof(u##bits)]; \ + const int part = PAGE_SIZE - oip; \ \ ASSERT(token); \ - ASSERT(token->eb == eb); \ - \ - if (token->kaddr && token->offset <= offset && \ - (token->offset + PAGE_SIZE >= offset + size)) { \ - kaddr = token->kaddr; \ - p = kaddr + part_offset - token->offset; \ - put_unaligned_le##bits(val, p + off); \ + ASSERT(token->kaddr); \ + ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ + if (token->offset <= member_offset && \ + member_offset + size <= token->offset + PAGE_SIZE) { \ + put_unaligned_le##bits(val, token->kaddr + oip); \ return; \ } \ - err = map_private_extent_buffer(eb, offset, size, \ - &kaddr, &map_start, &map_len); \ - if (err) { \ - __le##bits val2; \ - \ - val2 = cpu_to_le##bits(val); \ - write_extent_buffer(eb, &val2, offset, size); \ + token->kaddr = page_address(token->eb->pages[idx]); \ + token->offset = idx << PAGE_SHIFT; \ + if (oip + size <= PAGE_SIZE) { \ + put_unaligned_le##bits(val, token->kaddr + oip); \ return; \ } \ - p = kaddr + part_offset - map_start; \ - put_unaligned_le##bits(val, p + off); \ - token->kaddr = kaddr; \ - token->offset = map_start; \ + put_unaligned_le##bits(val, lebytes); \ + memcpy(token->kaddr + oip, lebytes, part); \ + token->kaddr = page_address(token->eb->pages[idx + 1]); \ + token->offset = (idx + 1) << PAGE_SHIFT; \ + memcpy(token->kaddr, lebytes + part, size - part); \ } \ -void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ +void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \ unsigned long off, u##bits val) \ { \ - unsigned long part_offset = (unsigned long)ptr; \ - unsigned long offset = part_offset + off; \ - void *p; \ - int err; \ - char *kaddr; \ - unsigned long map_start; \ - unsigned long map_len; \ - int size = sizeof(u##bits); \ - \ - err = map_private_extent_buffer(eb, offset, size, \ - &kaddr, &map_start, &map_len); \ - if (err) { \ - __le##bits val2; \ + const unsigned long member_offset = (unsigned long)ptr + off; \ + const unsigned long oip = offset_in_page(member_offset); \ + const unsigned long idx = member_offset >> PAGE_SHIFT; \ + char *kaddr = page_address(eb->pages[idx]); \ + const int size = sizeof(u##bits); \ + const int part = PAGE_SIZE - oip; \ + u8 lebytes[sizeof(u##bits)]; \ \ - val2 = cpu_to_le##bits(val); \ - write_extent_buffer(eb, &val2, offset, size); \ + ASSERT(check_setget_bounds(eb, ptr, off, size)); \ + if (oip + size <= PAGE_SIZE) { \ + put_unaligned_le##bits(val, kaddr + oip); \ return; \ } \ - p = kaddr + part_offset - map_start; \ - put_unaligned_le##bits(val, p + off); \ + \ + put_unaligned_le##bits(val, lebytes); \ + memcpy(kaddr + oip, lebytes, part); \ + kaddr = page_address(eb->pages[idx + 1]); \ + memcpy(kaddr, lebytes + part, size - part); \ } DEFINE_BTRFS_SETGET_BITS(8) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 7932d8d07cff..bc73fd670702 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -72,23 +72,32 @@ const char * __attribute_const__ btrfs_decode_error(int errno) char *errstr = "unknown"; switch (errno) { - case -EIO: + case -ENOENT: /* -2 */ + errstr = "No such entry"; + break; + case -EIO: /* -5 */ errstr = "IO failure"; break; - case -ENOMEM: + case -ENOMEM: /* -12*/ errstr = "Out of memory"; break; - case -EROFS: - errstr = "Readonly filesystem"; - break; - case -EEXIST: + case -EEXIST: /* -17 */ errstr = "Object already exists"; break; - case -ENOSPC: + case -ENOSPC: /* -28 */ errstr = "No space left"; break; - case -ENOENT: - errstr = "No such entry"; + case -EROFS: /* -30 */ + errstr = "Readonly filesystem"; + break; + case -EOPNOTSUPP: /* -95 */ + errstr = "Operation not supported"; + break; + case -EUCLEAN: /* -117 */ + errstr = "Filesystem corrupted"; + break; + case -EDQUOT: /* -122 */ + errstr = "Quota exceeded"; break; } @@ -1093,10 +1102,7 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref); btrfs_release_path(path); - key.objectid = subvol_objectid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - fs_root = btrfs_get_fs_root(fs_info, &key, true); + fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true); if (IS_ERR(fs_root)) { ret = PTR_ERR(fs_root); fs_root = NULL; @@ -1211,7 +1217,6 @@ static int btrfs_fill_super(struct super_block *sb, { struct inode *inode; struct btrfs_fs_info *fs_info = btrfs_sb(sb); - struct btrfs_key key; int err; sb->s_maxbytes = MAX_LFS_FILESIZE; @@ -1239,10 +1244,7 @@ static int btrfs_fill_super(struct super_block *sb, return err; } - key.objectid = BTRFS_FIRST_FREE_OBJECTID; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - inode = btrfs_iget(sb, &key, fs_info->fs_root); + inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto fail_close; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2d5498136e5e..b359d4b17658 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -21,6 +21,7 @@ #include "dev-replace.h" #include "qgroup.h" #include "block-group.h" +#include "space-info.h" #define BTRFS_ROOT_TRANS_TAG 0 @@ -141,7 +142,7 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) struct btrfs_block_group, bg_list); list_del_init(&cache->bg_list); - btrfs_put_block_group_trimming(cache); + btrfs_unfreeze_block_group(cache); btrfs_put_block_group(cache); } WARN_ON(!list_empty(&transaction->dev_update_list)); @@ -348,10 +349,10 @@ loop: } /* - * this does all the record keeping required to make sure that a reference - * counted root is properly recorded in a given transaction. This is required - * to make sure the old root from before we joined the transaction is deleted - * when the transaction commits + * This does all the record keeping required to make sure that a shareable root + * is properly recorded in a given transaction. This is required to make sure + * the old root from before we joined the transaction is deleted when the + * transaction commits. */ static int record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -359,7 +360,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = root->fs_info; - if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) && + if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && root->last_trans < trans->transid) || force) { WARN_ON(root == fs_info->extent_root); WARN_ON(!force && root->commit_root != root->node); @@ -438,7 +439,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = root->fs_info; - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return 0; /* @@ -503,7 +504,7 @@ static inline bool need_reserve_reloc_root(struct btrfs_root *root) struct btrfs_fs_info *fs_info = root->fs_info; if (!fs_info->reloc_ctl || - !test_bit(BTRFS_ROOT_REF_COWS, &root->state) || + !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || root->reloc_root) return false; @@ -523,6 +524,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, u64 num_bytes = 0; u64 qgroup_reserved = 0; bool reloc_reserved = false; + bool do_chunk_alloc = false; int ret; /* Send isn't supposed to start transactions. */ @@ -563,7 +565,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, * refill that amount for whatever is missing in the reserve. */ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); - if (delayed_refs_rsv->full == 0) { + if (flush == BTRFS_RESERVE_FLUSH_ALL && + delayed_refs_rsv->full == 0) { delayed_refs_bytes = num_bytes; num_bytes <<= 1; } @@ -584,6 +587,9 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, delayed_refs_bytes); num_bytes -= delayed_refs_bytes; } + + if (rsv->space_info->force_alloc) + do_chunk_alloc = true; } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && !delayed_refs_rsv->full) { /* @@ -666,6 +672,19 @@ got_it: current->journal_info = h; /* + * If the space_info is marked ALLOC_FORCE then we'll get upgraded to + * ALLOC_FORCE the first run through, and then we won't allocate for + * anybody else who races in later. We don't care about the return + * value here. + */ + if (do_chunk_alloc && num_bytes) { + u64 flags = h->block_rsv->space_info->flags; + + btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), + CHUNK_ALLOC_NO_FORCE); + } + + /* * btrfs_record_root_in_trans() needs to alloc new extents, and may * call btrfs_join_transaction() while we're also starting a * transaction. @@ -699,43 +718,10 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( struct btrfs_root *root, - unsigned int num_items, - int min_factor) + unsigned int num_items) { - struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_trans_handle *trans; - u64 num_bytes; - int ret; - - /* - * We have two callers: unlink and block group removal. The - * former should succeed even if we will temporarily exceed - * quota and the latter operates on the extent root so - * qgroup enforcement is ignored anyway. - */ - trans = start_transaction(root, num_items, TRANS_START, - BTRFS_RESERVE_FLUSH_ALL, false); - if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) - return trans; - - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return trans; - - num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); - ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv, - num_bytes, min_factor); - if (ret) { - btrfs_end_transaction(trans); - return ERR_PTR(ret); - } - - trans->block_rsv = &fs_info->trans_block_rsv; - trans->bytes_reserved = num_bytes; - trace_btrfs_space_reservation(fs_info, "transaction", - trans->transid, num_bytes, 1); - - return trans; + return start_transaction(root, num_items, TRANS_START, + BTRFS_RESERVE_FLUSH_ALL_STEAL, false); } struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) @@ -1644,7 +1630,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, } key.offset = (u64)-1; - pending->snap = btrfs_get_fs_root(fs_info, &key, true); + pending->snap = btrfs_get_fs_root(fs_info, objectid, true); if (IS_ERR(pending->snap)) { ret = PTR_ERR(pending->snap); btrfs_abort_transaction(trans, ret); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 31ae8d273065..bf102e64bfb2 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -193,8 +193,7 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, unsigned int num_items); struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( struct btrfs_root *root, - unsigned int num_items, - int min_factor); + unsigned int num_items); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index a92f8a6dd192..517b44300a05 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -957,10 +957,6 @@ static int check_dev_item(struct extent_buffer *leaf, return 0; } -/* Inode item error output has the same format as dir_item_err() */ -#define inode_item_err(eb, slot, fmt, ...) \ - dir_item_err(eb, slot, fmt, __VA_ARGS__) - static int check_inode_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot) { diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 5f9e2dd413af..16c3a6d2586d 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -35,7 +35,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, goto out; } - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) goto out; path = btrfs_alloc_path(); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 02ebdd9edc19..920cee312f4e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -505,13 +505,8 @@ insert: */ if (S_ISREG(btrfs_inode_mode(eb, src_item)) && S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && - ino_size != 0) { - struct btrfs_map_token token; - - btrfs_init_map_token(&token, dst_eb); - btrfs_set_token_inode_size(dst_eb, dst_item, - ino_size, &token); - } + ino_size != 0) + btrfs_set_inode_size(dst_eb, dst_item, ino_size); goto no_copy; } @@ -555,13 +550,9 @@ no_copy: static noinline struct inode *read_one_inode(struct btrfs_root *root, u64 objectid) { - struct btrfs_key key; struct inode *inode; - key.objectid = objectid; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - inode = btrfs_iget(root->fs_info->sb, &key, root); + inode = btrfs_iget(root->fs_info->sb, objectid, root); if (IS_ERR(inode)) inode = NULL; return inode; @@ -3299,6 +3290,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans, clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); + extent_io_tree_release(&log->log_csum_range); btrfs_put_root(log); } @@ -3816,8 +3808,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, found_key.offset = 0; found_key.type = 0; - ret = btrfs_bin_search(path->nodes[0], &found_key, 0, - &start_slot); + ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot); if (ret < 0) break; @@ -3853,44 +3844,41 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, * just to say 'this inode exists' and a logging * to say 'update this inode with these values' */ - btrfs_set_token_inode_generation(leaf, item, 0, &token); - btrfs_set_token_inode_size(leaf, item, logged_isize, &token); + btrfs_set_token_inode_generation(&token, item, 0); + btrfs_set_token_inode_size(&token, item, logged_isize); } else { - btrfs_set_token_inode_generation(leaf, item, - BTRFS_I(inode)->generation, - &token); - btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); - } - - btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); - btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); - btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); - btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); - - btrfs_set_token_timespec_sec(leaf, &item->atime, - inode->i_atime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, &item->atime, - inode->i_atime.tv_nsec, &token); - - btrfs_set_token_timespec_sec(leaf, &item->mtime, - inode->i_mtime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, &item->mtime, - inode->i_mtime.tv_nsec, &token); - - btrfs_set_token_timespec_sec(leaf, &item->ctime, - inode->i_ctime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, &item->ctime, - inode->i_ctime.tv_nsec, &token); - - btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), - &token); - - btrfs_set_token_inode_sequence(leaf, item, - inode_peek_iversion(inode), &token); - btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); - btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); - btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); - btrfs_set_token_inode_block_group(leaf, item, 0, &token); + btrfs_set_token_inode_generation(&token, item, + BTRFS_I(inode)->generation); + btrfs_set_token_inode_size(&token, item, inode->i_size); + } + + btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); + btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); + btrfs_set_token_inode_mode(&token, item, inode->i_mode); + btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); + + btrfs_set_token_timespec_sec(&token, &item->atime, + inode->i_atime.tv_sec); + btrfs_set_token_timespec_nsec(&token, &item->atime, + inode->i_atime.tv_nsec); + + btrfs_set_token_timespec_sec(&token, &item->mtime, + inode->i_mtime.tv_sec); + btrfs_set_token_timespec_nsec(&token, &item->mtime, + inode->i_mtime.tv_nsec); + + btrfs_set_token_timespec_sec(&token, &item->ctime, + inode->i_ctime.tv_sec); + btrfs_set_token_timespec_nsec(&token, &item->ctime, + inode->i_ctime.tv_nsec); + + btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); + + btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); + btrfs_set_token_inode_transid(&token, item, trans->transid); + btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); + btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags); + btrfs_set_token_inode_block_group(&token, item, 0); } static int log_inode_item(struct btrfs_trans_handle *trans, @@ -3916,9 +3904,21 @@ static int log_csums(struct btrfs_trans_handle *trans, struct btrfs_root *log_root, struct btrfs_ordered_sum *sums) { + const u64 lock_end = sums->bytenr + sums->len - 1; + struct extent_state *cached_state = NULL; int ret; /* + * Serialize logging for checksums. This is to avoid racing with the + * same checksum being logged by another task that is logging another + * file which happens to refer to the same extent as well. Such races + * can leave checksum items in the log with overlapping ranges. + */ + ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr, + lock_end, &cached_state); + if (ret) + return ret; + /* * Due to extent cloning, we might have logged a csum item that covers a * subrange of a cloned extent, and later we can end up logging a csum * item for a larger subrange of the same extent or the entire range. @@ -3928,10 +3928,13 @@ static int log_csums(struct btrfs_trans_handle *trans, * trim and adjust) any existing csum items in the log for this range. */ ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len); - if (ret) - return ret; + if (!ret) + ret = btrfs_csum_file_blocks(trans, log_root, sums); - return btrfs_csum_file_blocks(trans, log_root, sums); + unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end, + &cached_state); + + return ret; } static noinline int copy_items(struct btrfs_trans_handle *trans, @@ -4164,43 +4167,35 @@ static int log_one_extent(struct btrfs_trans_handle *trans, fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); - btrfs_set_token_file_extent_generation(leaf, fi, trans->transid, - &token); + btrfs_set_token_file_extent_generation(&token, fi, trans->transid); if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) - btrfs_set_token_file_extent_type(leaf, fi, - BTRFS_FILE_EXTENT_PREALLOC, - &token); + btrfs_set_token_file_extent_type(&token, fi, + BTRFS_FILE_EXTENT_PREALLOC); else - btrfs_set_token_file_extent_type(leaf, fi, - BTRFS_FILE_EXTENT_REG, - &token); + btrfs_set_token_file_extent_type(&token, fi, + BTRFS_FILE_EXTENT_REG); block_len = max(em->block_len, em->orig_block_len); if (em->compress_type != BTRFS_COMPRESS_NONE) { - btrfs_set_token_file_extent_disk_bytenr(leaf, fi, - em->block_start, - &token); - btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, - &token); + btrfs_set_token_file_extent_disk_bytenr(&token, fi, + em->block_start); + btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len); } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { - btrfs_set_token_file_extent_disk_bytenr(leaf, fi, + btrfs_set_token_file_extent_disk_bytenr(&token, fi, em->block_start - - extent_offset, &token); - btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, - &token); + extent_offset); + btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len); } else { - btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); - btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, - &token); - } - - btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token); - btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); - btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); - btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, - &token); - btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); - btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); + btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0); + btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0); + } + + btrfs_set_token_file_extent_offset(&token, fi, extent_offset); + btrfs_set_token_file_extent_num_bytes(&token, fi, em->len); + btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes); + btrfs_set_token_file_extent_compression(&token, fi, em->compress_type); + btrfs_set_token_file_extent_encryption(&token, fi, 0); + btrfs_set_token_file_extent_other_encoding(&token, fi, 0); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); @@ -4336,12 +4331,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, } } } - if (ins_nr > 0) { + if (ins_nr > 0) ret = copy_items(trans, inode, dst_path, path, start_slot, ins_nr, 1, 0); - if (ret > 0) - ret = 0; - } out: btrfs_release_path(path); btrfs_free_path(dst_path); @@ -4835,10 +4827,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, btrfs_release_path(path); - key.objectid = ino; - key.type = BTRFS_INODE_ITEM_KEY; - key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root); + inode = btrfs_iget(fs_info->sb, ino, root); /* * If the other inode that had a conflicting dir entry was * deleted in the current transaction, we need to log its parent @@ -4847,8 +4836,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, if (IS_ERR(inode)) { ret = PTR_ERR(inode); if (ret == -ENOENT) { - key.objectid = parent; - inode = btrfs_iget(fs_info->sb, &key, root); + inode = btrfs_iget(fs_info->sb, parent, root); if (IS_ERR(inode)) { ret = PTR_ERR(inode); } else { @@ -5587,7 +5575,7 @@ process_leaf: continue; btrfs_release_path(path); - di_inode = btrfs_iget(fs_info->sb, &di_key, root); + di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root); if (IS_ERR(di_inode)) { ret = PTR_ERR(di_inode); goto next_dir_inode; @@ -5713,7 +5701,8 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, cur_offset = item_size; } - dir_inode = btrfs_iget(fs_info->sb, &inode_key, root); + dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid, + root); /* * If the parent inode was deleted, return an error to * fallback to a transaction commit. This is to prevent @@ -5780,14 +5769,17 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans, int slot = path->slots[0]; struct btrfs_key search_key; struct inode *inode; + u64 ino; int ret = 0; btrfs_release_path(path); + ino = found_key.offset; + search_key.objectid = found_key.offset; search_key.type = BTRFS_INODE_ITEM_KEY; search_key.offset = 0; - inode = btrfs_iget(fs_info->sb, &search_key, root); + inode = btrfs_iget(fs_info->sb, ino, root); if (IS_ERR(inode)) return PTR_ERR(inode); @@ -6132,7 +6124,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) struct btrfs_trans_handle *trans; struct btrfs_key key; struct btrfs_key found_key; - struct btrfs_key tmp_key; struct btrfs_root *log; struct btrfs_fs_info *fs_info = log_root_tree->fs_info; struct walk_control wc = { @@ -6194,11 +6185,8 @@ again: goto error; } - tmp_key.objectid = found_key.offset; - tmp_key.type = BTRFS_ROOT_ITEM_KEY; - tmp_key.offset = (u64)-1; - - wc.replay_dest = btrfs_get_fs_root(fs_info, &tmp_key, true); + wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset, + true); if (IS_ERR(wc.replay_dest)) { ret = PTR_ERR(wc.replay_dest); diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index 76671a6bcb61..28525ad7ff8c 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c @@ -257,7 +257,6 @@ out: static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, u8 *uuid, u8 type, u64 subvolid) { - struct btrfs_key key; int ret = 0; struct btrfs_root *subvol_root; @@ -265,10 +264,7 @@ static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) goto out; - key.objectid = subvolid; - key.type = BTRFS_ROOT_ITEM_KEY; - key.offset = (u64)-1; - subvol_root = btrfs_get_fs_root(fs_info, &key, true); + subvol_root = btrfs_get_fs_root(fs_info, subvolid, true); if (IS_ERR(subvol_root)) { ret = PTR_ERR(subvol_root); if (ret == -ENOENT) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c1909e5f4506..0d6e785bcb98 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -280,10 +280,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * ============ * * uuid_mutex - * volume_mutex - * device_list_mutex - * chunk_mutex - * balance_mutex + * device_list_mutex + * chunk_mutex + * balance_mutex * * * Exclusive operations, BTRFS_FS_EXCL_OP @@ -1042,6 +1041,8 @@ again: &device->dev_state)) { if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state) && + !test_bit(BTRFS_DEV_STATE_MISSING, + &device->dev_state) && (!latest_dev || device->generation > latest_dev->generation)) { latest_dev = device; @@ -1185,7 +1186,6 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices, { struct btrfs_device *device; struct btrfs_device *latest_dev = NULL; - int ret = 0; flags |= FMODE_EXCL; @@ -1198,16 +1198,15 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices, device->generation > latest_dev->generation) latest_dev = device; } - if (fs_devices->open_devices == 0) { - ret = -EINVAL; - goto out; - } + if (fs_devices->open_devices == 0) + return -EINVAL; + fs_devices->opened = 1; fs_devices->latest_bdev = latest_dev->bdev; fs_devices->total_rw_bytes = 0; fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; -out: - return ret; + + return 0; } static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) @@ -1251,49 +1250,48 @@ void btrfs_release_disk_super(struct btrfs_super_block *super) put_page(page); } -static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, - struct page **page, - struct btrfs_super_block **disk_super) +static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, + u64 bytenr) { + struct btrfs_super_block *disk_super; + struct page *page; void *p; pgoff_t index; /* make sure our super fits in the device */ if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) - return 1; + return ERR_PTR(-EINVAL); /* make sure our super fits in the page */ - if (sizeof(**disk_super) > PAGE_SIZE) - return 1; + if (sizeof(*disk_super) > PAGE_SIZE) + return ERR_PTR(-EINVAL); /* make sure our super doesn't straddle pages on disk */ index = bytenr >> PAGE_SHIFT; - if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) - return 1; + if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) + return ERR_PTR(-EINVAL); /* pull in the page with our super */ - *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, - index, GFP_KERNEL); + page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); - if (IS_ERR(*page)) - return 1; + if (IS_ERR(page)) + return ERR_CAST(page); - p = page_address(*page); + p = page_address(page); /* align our pointer to the offset of the super block */ - *disk_super = p + offset_in_page(bytenr); + disk_super = p + offset_in_page(bytenr); - if (btrfs_super_bytenr(*disk_super) != bytenr || - btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { + if (btrfs_super_bytenr(disk_super) != bytenr || + btrfs_super_magic(disk_super) != BTRFS_MAGIC) { btrfs_release_disk_super(p); - return 1; + return ERR_PTR(-EINVAL); } - if ((*disk_super)->label[0] && - (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) - (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; + if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) + disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; - return 0; + return disk_super; } int btrfs_forget_devices(const char *path) @@ -1319,7 +1317,6 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, bool new_device_added = false; struct btrfs_device *device = NULL; struct block_device *bdev; - struct page *page; u64 bytenr; lockdep_assert_held(&uuid_mutex); @@ -1337,8 +1334,9 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, if (IS_ERR(bdev)) return ERR_CAST(bdev); - if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) { - device = ERR_PTR(-EINVAL); + disk_super = btrfs_read_disk_super(bdev, bytenr); + if (IS_ERR(disk_super)) { + device = ERR_CAST(disk_super); goto error_bdev_put; } @@ -2663,8 +2661,18 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path ret = btrfs_commit_transaction(trans); } - /* Update ctime/mtime for libblkid */ + /* + * Now that we have written a new super block to this device, check all + * other fs_devices list if device_path alienates any other scanned + * device. + * We can ignore the return value as it typically returns -EINVAL and + * only succeeds if the device was an alien. + */ + btrfs_forget_devices(device_path); + + /* Update ctime/mtime for blkid or udev */ update_dev_time(device_path); + return ret; error_sysfs: diff --git a/fs/buffer.c b/fs/buffer.c index a60f60396cfa..64fe82ec65ff 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -123,14 +123,6 @@ void __wait_on_buffer(struct buffer_head * bh) } EXPORT_SYMBOL(__wait_on_buffer); -static void -__clear_page_buffers(struct page *page) -{ - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); -} - static void buffer_io_error(struct buffer_head *bh, char *msg) { if (!test_bit(BH_Quiet, &bh->b_state)) @@ -906,7 +898,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head) bh = bh->b_this_page; } while (bh); tail->b_this_page = head; - attach_page_buffers(page, head); + attach_page_private(page, head); } static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) @@ -1154,12 +1146,19 @@ EXPORT_SYMBOL(mark_buffer_dirty); void mark_buffer_write_io_error(struct buffer_head *bh) { + struct super_block *sb; + set_buffer_write_io_error(bh); /* FIXME: do we need to set this in both places? */ if (bh->b_page && bh->b_page->mapping) mapping_set_error(bh->b_page->mapping, -EIO); if (bh->b_assoc_map) mapping_set_error(bh->b_assoc_map, -EIO); + rcu_read_lock(); + sb = READ_ONCE(bh->b_bdev->bd_super); + if (sb) + errseq_set(&sb->s_wb_err, -EIO); + rcu_read_unlock(); } EXPORT_SYMBOL(mark_buffer_write_io_error); @@ -1580,7 +1579,7 @@ void create_empty_buffers(struct page *page, bh = bh->b_this_page; } while (bh != head); } - attach_page_buffers(page, head); + attach_page_private(page, head); spin_unlock(&page->mapping->private_lock); } EXPORT_SYMBOL(create_empty_buffers); @@ -2567,7 +2566,7 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head) bh->b_this_page = head; bh = bh->b_this_page; } while (bh != head); - attach_page_buffers(page, head); + attach_page_private(page, head); spin_unlock(&page->mapping->private_lock); } @@ -3227,7 +3226,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free) bh = next; } while (bh != head); *buffers_to_free = head; - __clear_page_buffers(page); + detach_page_private(page); return 1; failed: return 0; diff --git a/fs/cachefiles/Kconfig b/fs/cachefiles/Kconfig index ae559ed5b3b3..ff9ca55a9ae9 100644 --- a/fs/cachefiles/Kconfig +++ b/fs/cachefiles/Kconfig @@ -8,7 +8,7 @@ config CACHEFILES filesystems - primarily networking filesystems - thus allowing fast local disk to enhance the speed of slower devices. - See Documentation/filesystems/caching/cachefiles.txt for more + See Documentation/filesystems/caching/cachefiles.rst for more information. config CACHEFILES_DEBUG @@ -36,5 +36,5 @@ config CACHEFILES_HISTOGRAM bouncing between CPUs. On the other hand, the histogram may be useful for debugging purposes. Saying 'N' here is recommended. - See Documentation/filesystems/caching/cachefiles.txt for more + See Documentation/filesystems/caching/cachefiles.rst for more information. diff --git a/fs/char_dev.c b/fs/char_dev.c index c5e6eff5a381..ba0ded7842a7 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -483,6 +483,9 @@ int cdev_add(struct cdev *p, dev_t dev, unsigned count) p->dev = dev; p->count = count; + if (WARN_ON(dev == WHITEOUT_DEV)) + return -EBUSY; + error = kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); if (error) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fdfd7cf4c720..5fac34f192af 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3936,14 +3936,8 @@ generic_ip_connect(struct TCP_Server_Info *server) socket->sk->sk_rcvbuf = 140 * 1024; } - if (server->tcp_nodelay) { - int val = 1; - rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - if (rc) - cifs_dbg(FYI, "set TCP_NODELAY socket option error %d\n", - rc); - } + if (server->tcp_nodelay) + tcp_sock_set_nodelay(socket->sk); cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n", socket->sk->sk_sndbuf, diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 2ca9b387d216..8277859d12a3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4163,7 +4163,7 @@ cifs_readv_complete(struct work_struct *work) for (i = 0; i < rdata->nr_pages; i++) { struct page *page = rdata->pages[i]; - lru_cache_add_file(page); + lru_cache_add(page); if (rdata->result == 0 || (rdata->result == -EAGAIN && got_bytes)) { @@ -4233,7 +4233,7 @@ readpages_fill_pages(struct TCP_Server_Info *server, * fill them until the writes are flushed. */ zero_user(page, 0, PAGE_SIZE); - lru_cache_add_file(page); + lru_cache_add(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); @@ -4243,7 +4243,7 @@ readpages_fill_pages(struct TCP_Server_Info *server, continue; } else { /* no need to hold page hostage */ - lru_cache_add_file(page); + lru_cache_add(page); unlock_page(page); put_page(page); rdata->pages[i] = NULL; @@ -4438,7 +4438,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, /* best to give up if we're out of mem */ list_for_each_entry_safe(page, tpage, &tmplist, lru) { list_del(&page->lru); - lru_cache_add_file(page); + lru_cache_add(page); unlock_page(page); put_page(page); } @@ -4477,7 +4477,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, add_credits_and_wake_if(server, &rdata->credits, 0); for (i = 0; i < rdata->nr_pages; i++) { page = rdata->pages[i]; - lru_cache_add_file(page); + lru_cache_add(page); unlock_page(page); put_page(page); } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 5416ff339401..5072bcaf4be1 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -25,6 +25,7 @@ #include <linux/freezer.h> #include <linux/sched/signal.h> #include <linux/wait_bit.h> +#include <linux/fiemap.h> #include <asm/div64.h> #include "cifsfs.h" diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index e97eb0050a0e..736d86b8a910 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -12,6 +12,7 @@ #include <linux/uuid.h> #include <linux/sort.h> #include <crypto/aead.h> +#include <linux/fiemap.h> #include "cifsfs.h" #include "cifsglob.h" #include "smb2pdu.h" @@ -3449,8 +3450,9 @@ static int smb3_fiemap(struct cifs_tcon *tcon, int i, num, rc, flags, last_blob; u64 next; - if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC)) - return -EBADR; + rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0); + if (rc) + return rc; xid = get_xid(); again: diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 100d04af62b1..d11e31064679 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -325,7 +325,6 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg; - int val = 1; __be32 rfc1002_marker; if (cifs_rdma_enabled(server)) { @@ -345,8 +344,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, } /* cork the socket */ - kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, - (char *)&val, sizeof(val)); + tcp_sock_set_cork(ssocket->sk, true); for (j = 0; j < num_rqst; j++) send_length += smb_rqst_len(server, &rqst[j]); @@ -435,9 +433,7 @@ unmask: } /* uncork it */ - val = 0; - kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, - (char *)&val, sizeof(val)); + tcp_sock_set_cork(ssocket->sk, false); if ((total_len > 0) && (total_len != send_length)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", diff --git a/fs/coda/Kconfig b/fs/coda/Kconfig index ae6759f9594a..c3477eeafb3f 100644 --- a/fs/coda/Kconfig +++ b/fs/coda/Kconfig @@ -15,7 +15,7 @@ config CODA_FS *client*. You will need user level code as well, both for the client and server. Servers are currently user level, i.e. they need no kernel support. Please read - <file:Documentation/filesystems/coda.txt> and check out the Coda + <file:Documentation/filesystems/coda.rst> and check out the Coda home page <http://www.coda.cs.cmu.edu/>. To compile the coda client support as a module, choose M here: the diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c index aaad4ca1217e..2d24c765cbd7 100644 --- a/fs/compat_binfmt_elf.c +++ b/fs/compat_binfmt_elf.c @@ -17,6 +17,8 @@ #include <linux/elfcore-compat.h> #include <linux/time.h> +#define ELF_COMPAT 1 + /* * Rename the basic ELF layout types to refer to the 32-bit class of files. */ @@ -28,18 +30,20 @@ #undef elf_shdr #undef elf_note #undef elf_addr_t +#undef ELF_GNU_PROPERTY_ALIGN #define elfhdr elf32_hdr #define elf_phdr elf32_phdr #define elf_shdr elf32_shdr #define elf_note elf32_note #define elf_addr_t Elf32_Addr +#define ELF_GNU_PROPERTY_ALIGN ELF32_GNU_PROPERTY_ALIGN /* * Some data types as stored in coredump. */ #define user_long_t compat_long_t #define user_siginfo_t compat_siginfo_t -#define copy_siginfo_to_user copy_siginfo_to_user32 +#define copy_siginfo_to_external copy_siginfo_to_external32 /* * The machine-dependent core note format types are defined in elfcore-compat.h, @@ -113,6 +117,11 @@ #define arch_setup_additional_pages compat_arch_setup_additional_pages #endif +#ifdef compat_elf_read_implies_exec +#undef elf_read_implies_exec +#define elf_read_implies_exec compat_elf_read_implies_exec +#endif + /* * Rename a few of the symbols that binfmt_elf.c will define. * These are all local so the names don't really matter, but it diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index fd0b5dd68f9e..8bd6a883c94c 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -9,7 +9,7 @@ * * configfs Copyright (C) 2005 Oracle. All rights reserved. * - * Please see Documentation/filesystems/configfs/configfs.txt for more + * Please see Documentation/filesystems/configfs.rst for more * information. */ diff --git a/fs/configfs/item.c b/fs/configfs/item.c index 6e0f1fcb8a5b..704a4356f137 100644 --- a/fs/configfs/item.c +++ b/fs/configfs/item.c @@ -9,7 +9,7 @@ * * configfs Copyright (C) 2005 Oracle. All rights reserved. * - * Please see the file Documentation/filesystems/configfs/configfs.txt for + * Please see the file Documentation/filesystems/configfs.rst for * critical information about using the config_item interface. */ diff --git a/fs/cramfs/Kconfig b/fs/cramfs/Kconfig index c8bebb70a971..d98cef0dbb6b 100644 --- a/fs/cramfs/Kconfig +++ b/fs/cramfs/Kconfig @@ -9,7 +9,7 @@ config CRAMFS limited to 256MB file systems (with 16MB files), and doesn't support 16/32 bits uid/gid, hard links and timestamps. - See <file:Documentation/filesystems/cramfs.txt> and + See <file:Documentation/filesystems/cramfs.rst> and <file:fs/cramfs/README> for further information. To compile this as a module, choose M here: the module will be called diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 1ecaac7ee3cb..ed015cb66c7c 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -54,6 +54,7 @@ struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags) /** * fscrypt_free_bounce_page() - free a ciphertext bounce page + * @bounce_page: the bounce page to free, or NULL * * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(), * or by fscrypt_alloc_bounce_page() directly. @@ -76,8 +77,12 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, memset(iv, 0, ci->ci_mode->ivsize); if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { - WARN_ON_ONCE((u32)lblk_num != lblk_num); + WARN_ON_ONCE(lblk_num > U32_MAX); + WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX); lblk_num |= (u64)ci->ci_inode->i_ino << 32; + } else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { + WARN_ON_ONCE(lblk_num > U32_MAX); + lblk_num = (u32)(ci->ci_hashed_ino + lblk_num); } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); } @@ -132,7 +137,8 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, } /** - * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page + * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a + * pagecache page * @page: The locked pagecache page containing the block(s) to encrypt * @len: Total size of the block(s) to encrypt. Must be a nonzero * multiple of the filesystem's block size. @@ -222,7 +228,8 @@ int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, EXPORT_SYMBOL(fscrypt_encrypt_block_inplace); /** - * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page + * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a + * pagecache page * @page: The locked pagecache page containing the block(s) to decrypt * @len: Total size of the block(s) to decrypt. Must be a nonzero * multiple of the filesystem's block size. @@ -346,6 +353,8 @@ void fscrypt_msg(const struct inode *inode, const char *level, /** * fscrypt_init() - Set up for fs encryption. + * + * Return: 0 on success; -errno on failure */ static int __init fscrypt_init(void) { diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 4c212442a8f7..83ca5f1e7934 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -18,7 +18,7 @@ #include <crypto/skcipher.h> #include "fscrypt_private.h" -/** +/* * struct fscrypt_nokey_name - identifier for directory entry when key is absent * * When userspace lists an encrypted directory without access to the key, the @@ -83,13 +83,8 @@ static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result) tfm = prev_tfm; } } - { - SHASH_DESC_ON_STACK(desc, tfm); - desc->tfm = tfm; - - return crypto_shash_digest(desc, data, data_len, result); - } + return crypto_shash_tfm_digest(tfm, data, data_len, result); } static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) @@ -105,9 +100,12 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) /** * fscrypt_fname_encrypt() - encrypt a filename - * - * The output buffer must be at least as large as the input buffer. - * Any extra space is filled with NUL padding before encryption. + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @iname: the filename to encrypt + * @out: (output) the encrypted filename + * @olen: size of the encrypted filename. It must be at least @iname->len. + * Any extra space is filled with NUL padding before encryption. * * Return: 0 on success, -errno on failure */ @@ -157,8 +155,11 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, /** * fname_decrypt() - decrypt a filename - * - * The caller must have allocated sufficient memory for the @oname string. + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @iname: the encrypted filename to decrypt + * @oname: (output) the decrypted filename. The caller must have allocated + * enough space for this, e.g. using fscrypt_fname_alloc_buffer(). * * Return: 0 on success, -errno on failure */ @@ -206,7 +207,10 @@ static const char lookup_table[65] = #define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3) /** - * base64_encode() - + * base64_encode() - base64-encode some bytes + * @src: the bytes to encode + * @len: number of bytes to encode + * @dst: (output) the base64-encoded string. Not NUL-terminated. * * Encodes the input string using characters from the set [A-Za-z0-9+,]. * The encoded string is roughly 4/3 times the size of the input string. @@ -272,7 +276,12 @@ bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, } /** - * fscrypt_fname_alloc_buffer - allocate a buffer for presented filenames + * fscrypt_fname_alloc_buffer() - allocate a buffer for presented filenames + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @max_encrypted_len: maximum length of encrypted filenames the buffer will be + * used to present + * @crypto_str: (output) buffer to allocate * * Allocate a buffer that is large enough to hold any decrypted or encoded * filename (null-terminated), for the given maximum encrypted filename length. @@ -297,9 +306,10 @@ int fscrypt_fname_alloc_buffer(const struct inode *inode, EXPORT_SYMBOL(fscrypt_fname_alloc_buffer); /** - * fscrypt_fname_free_buffer - free the buffer for presented filenames + * fscrypt_fname_free_buffer() - free a buffer for presented filenames + * @crypto_str: the buffer to free * - * Free the buffer allocated by fscrypt_fname_alloc_buffer(). + * Free a buffer that was allocated by fscrypt_fname_alloc_buffer(). */ void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) { @@ -311,10 +321,19 @@ void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) EXPORT_SYMBOL(fscrypt_fname_free_buffer); /** - * fscrypt_fname_disk_to_usr() - converts a filename from disk space to user - * space - * - * The caller must have allocated sufficient memory for the @oname string. + * fscrypt_fname_disk_to_usr() - convert an encrypted filename to + * user-presentable form + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @hash: first part of the name's dirhash, if applicable. This only needs to + * be provided if the filename is located in an indexed directory whose + * encryption key may be unavailable. Not needed for symlink targets. + * @minor_hash: second part of the name's dirhash, if applicable + * @iname: encrypted filename to convert. May also be "." or "..", which + * aren't actually encrypted. + * @oname: output buffer for the user-presentable filename. The caller must + * have allocated enough space for this, e.g. using + * fscrypt_fname_alloc_buffer(). * * If the key is available, we'll decrypt the disk name. Otherwise, we'll * encode it for presentation in fscrypt_nokey_name format. diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index dbced2937ec8..eb7fcd2b7fb8 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -43,7 +43,7 @@ struct fscrypt_context_v2 { u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; -/** +/* * fscrypt_context - the encryption context of an inode * * This is the on-disk equivalent of an fscrypt_policy, stored alongside each @@ -157,7 +157,7 @@ fscrypt_policy_flags(const union fscrypt_policy *policy) BUG(); } -/** +/* * For encrypted symlinks, the ciphertext length is stored at the beginning * of the string in little-endian format. */ @@ -222,6 +222,9 @@ struct fscrypt_info { /* This inode's nonce, copied from the fscrypt_context */ u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + + /* Hashed inode number. Only set for IV_INO_LBLK_32 */ + u32 ci_hashed_ino; }; typedef enum { @@ -231,15 +234,14 @@ typedef enum { /* crypto.c */ extern struct kmem_cache *fscrypt_info_cachep; -extern int fscrypt_initialize(unsigned int cop_flags); -extern int fscrypt_crypt_block(const struct inode *inode, - fscrypt_direction_t rw, u64 lblk_num, - struct page *src_page, struct page *dest_page, - unsigned int len, unsigned int offs, - gfp_t gfp_flags); -extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); - -extern void __printf(3, 4) __cold +int fscrypt_initialize(unsigned int cop_flags); +int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, + u64 lblk_num, struct page *src_page, + struct page *dest_page, unsigned int len, + unsigned int offs, gfp_t gfp_flags); +struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); + +void __printf(3, 4) __cold fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...); #define fscrypt_warn(inode, fmt, ...) \ @@ -264,12 +266,10 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci); /* fname.c */ -extern int fscrypt_fname_encrypt(const struct inode *inode, - const struct qstr *iname, - u8 *out, unsigned int olen); -extern bool fscrypt_fname_encrypted_size(const struct inode *inode, - u32 orig_len, u32 max_len, - u32 *encrypted_len_ret); +int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen); +bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, + u32 max_len, u32 *encrypted_len_ret); extern const struct dentry_operations fscrypt_d_ops; /* hkdf.c */ @@ -278,8 +278,8 @@ struct fscrypt_hkdf { struct crypto_shash *hmac_tfm; }; -extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, - unsigned int master_key_size); +int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, + unsigned int master_key_size); /* * The list of contexts in which fscrypt uses HKDF. These values are used as @@ -293,12 +293,14 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, #define HKDF_CONTEXT_DIRECT_KEY 3 #define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 #define HKDF_CONTEXT_DIRHASH_KEY 5 +#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 +#define HKDF_CONTEXT_INODE_HASH_KEY 7 -extern int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, - const u8 *info, unsigned int infolen, - u8 *okm, unsigned int okmlen); +int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, + const u8 *info, unsigned int infolen, + u8 *okm, unsigned int okmlen); -extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); +void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); /* keyring.c */ @@ -389,14 +391,17 @@ struct fscrypt_master_key { struct list_head mk_decrypted_inodes; spinlock_t mk_decrypted_inodes_lock; - /* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */ - struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1]; - /* - * Crypto API transforms for filesystem-layer implementation of - * IV_INO_LBLK_64 policies, allocated on-demand. + * Per-mode encryption keys for the various types of encryption policies + * that use them. Allocated and derived on-demand. */ - struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1]; + struct crypto_skcipher *mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; + struct crypto_skcipher *mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; + struct crypto_skcipher *mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; + + /* Hash key for inode numbers. Initialized only when needed. */ + siphash_key_t mk_ino_hash_key; + bool mk_ino_hash_key_initialized; } __randomize_layout; @@ -436,14 +441,17 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec) return 0; } -extern struct key * +struct key * fscrypt_find_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec); -extern int fscrypt_verify_key_added(struct super_block *sb, - const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]); +int fscrypt_add_test_dummy_key(struct super_block *sb, + struct fscrypt_key_specifier *key_spec); + +int fscrypt_verify_key_added(struct super_block *sb, + const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]); -extern int __init fscrypt_init_keyring(void); +int __init fscrypt_init_keyring(void); /* keysetup.c */ @@ -457,33 +465,32 @@ struct fscrypt_mode { extern struct fscrypt_mode fscrypt_modes[]; -extern struct crypto_skcipher * -fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, - const struct inode *inode); +struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, + const u8 *raw_key, + const struct inode *inode); -extern int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, - const u8 *raw_key); +int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key); -extern int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, - const struct fscrypt_master_key *mk); +int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, + const struct fscrypt_master_key *mk); /* keysetup_v1.c */ -extern void fscrypt_put_direct_key(struct fscrypt_direct_key *dk); +void fscrypt_put_direct_key(struct fscrypt_direct_key *dk); + +int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, + const u8 *raw_master_key); -extern int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, - const u8 *raw_master_key); +int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci); -extern int fscrypt_setup_v1_file_key_via_subscribed_keyrings( - struct fscrypt_info *ci); /* policy.c */ -extern bool fscrypt_policies_equal(const union fscrypt_policy *policy1, - const union fscrypt_policy *policy2); -extern bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, - const struct inode *inode); -extern int fscrypt_policy_from_context(union fscrypt_policy *policy_u, - const union fscrypt_context *ctx_u, - int ctx_size); +bool fscrypt_policies_equal(const union fscrypt_policy *policy1, + const union fscrypt_policy *policy2); +bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, + const struct inode *inode); +int fscrypt_policy_from_context(union fscrypt_policy *policy_u, + const union fscrypt_context *ctx_u, + int ctx_size); #endif /* _FSCRYPT_PRIVATE_H */ diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c index efb95bd19a89..0cba7928446d 100644 --- a/fs/crypto/hkdf.c +++ b/fs/crypto/hkdf.c @@ -44,17 +44,13 @@ static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm, unsigned int ikmlen, u8 prk[HKDF_HASHLEN]) { static const u8 default_salt[HKDF_HASHLEN]; - SHASH_DESC_ON_STACK(desc, hmac_tfm); int err; err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN); if (err) return err; - desc->tfm = hmac_tfm; - err = crypto_shash_digest(desc, ikm, ikmlen, prk); - shash_desc_zero(desc); - return err; + return crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk); } /* diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index 5ef861742921..09fb8aa0f2e9 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -10,7 +10,7 @@ #include "fscrypt_private.h" /** - * fscrypt_file_open - prepare to open a possibly-encrypted regular file + * fscrypt_file_open() - prepare to open a possibly-encrypted regular file * @inode: the inode being opened * @filp: the struct file being set up * @@ -262,7 +262,7 @@ err_free_sd: EXPORT_SYMBOL_GPL(__fscrypt_encrypt_symlink); /** - * fscrypt_get_symlink - get the target of an encrypted symlink + * fscrypt_get_symlink() - get the target of an encrypted symlink * @inode: the symlink inode * @caddr: the on-disk contents of the symlink * @max_size: size of @caddr buffer diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index ab41b25d4fa1..e24eb48bfbe1 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -20,6 +20,7 @@ #include <crypto/skcipher.h> #include <linux/key-type.h> +#include <linux/random.h> #include <linux/seq_file.h> #include "fscrypt_private.h" @@ -44,8 +45,9 @@ static void free_master_key(struct fscrypt_master_key *mk) wipe_master_key_secret(&mk->mk_secret); for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { - crypto_free_skcipher(mk->mk_direct_tfms[i]); - crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]); + crypto_free_skcipher(mk->mk_direct_keys[i]); + crypto_free_skcipher(mk->mk_iv_ino_lblk_64_keys[i]); + crypto_free_skcipher(mk->mk_iv_ino_lblk_32_keys[i]); } key_put(mk->mk_users); @@ -424,9 +426,9 @@ static int add_existing_master_key(struct fscrypt_master_key *mk, return 0; } -static int add_master_key(struct super_block *sb, - struct fscrypt_master_key_secret *secret, - const struct fscrypt_key_specifier *mk_spec) +static int do_add_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + const struct fscrypt_key_specifier *mk_spec) { static DEFINE_MUTEX(fscrypt_add_key_mutex); struct key *key; @@ -465,6 +467,35 @@ out_unlock: return err; } +static int add_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + struct fscrypt_key_specifier *key_spec) +{ + int err; + + if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { + err = fscrypt_init_hkdf(&secret->hkdf, secret->raw, + secret->size); + if (err) + return err; + + /* + * Now that the HKDF context is initialized, the raw key is no + * longer needed. + */ + memzero_explicit(secret->raw, secret->size); + + /* Calculate the key identifier */ + err = fscrypt_hkdf_expand(&secret->hkdf, + HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0, + key_spec->u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); + if (err) + return err; + } + return do_add_master_key(sb, secret, key_spec); +} + static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) { const struct fscrypt_provisioning_key_payload *payload = prep->data; @@ -609,6 +640,15 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; + /* + * Only root can add keys that are identified by an arbitrary descriptor + * rather than by a cryptographic hash --- since otherwise a malicious + * user could add the wrong key. + */ + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + !capable(CAP_SYS_ADMIN)) + return -EACCES; + memset(&secret, 0, sizeof(secret)); if (arg.key_id) { if (arg.raw_size != 0) @@ -626,48 +666,17 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) goto out_wipe_secret; } - switch (arg.key_spec.type) { - case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: - /* - * Only root can add keys that are identified by an arbitrary - * descriptor rather than by a cryptographic hash --- since - * otherwise a malicious user could add the wrong key. - */ - err = -EACCES; - if (!capable(CAP_SYS_ADMIN)) - goto out_wipe_secret; - break; - case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: - err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size); - if (err) - goto out_wipe_secret; - - /* - * Now that the HKDF context is initialized, the raw key is no - * longer needed. - */ - memzero_explicit(secret.raw, secret.size); - - /* Calculate the key identifier and return it to userspace. */ - err = fscrypt_hkdf_expand(&secret.hkdf, - HKDF_CONTEXT_KEY_IDENTIFIER, - NULL, 0, arg.key_spec.u.identifier, - FSCRYPT_KEY_IDENTIFIER_SIZE); - if (err) - goto out_wipe_secret; - err = -EFAULT; - if (copy_to_user(uarg->key_spec.u.identifier, - arg.key_spec.u.identifier, - FSCRYPT_KEY_IDENTIFIER_SIZE)) - goto out_wipe_secret; - break; - default: - WARN_ON(1); - err = -EINVAL; + err = add_master_key(sb, &secret, &arg.key_spec); + if (err) goto out_wipe_secret; - } - err = add_master_key(sb, &secret, &arg.key_spec); + /* Return the key identifier to userspace, if applicable */ + err = -EFAULT; + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && + copy_to_user(uarg->key_spec.u.identifier, arg.key_spec.u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE)) + goto out_wipe_secret; + err = 0; out_wipe_secret: wipe_master_key_secret(&secret); return err; @@ -675,6 +684,29 @@ out_wipe_secret: EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); /* + * Add the key for '-o test_dummy_encryption' to the filesystem keyring. + * + * Use a per-boot random key to prevent people from misusing this option. + */ +int fscrypt_add_test_dummy_key(struct super_block *sb, + struct fscrypt_key_specifier *key_spec) +{ + static u8 test_key[FSCRYPT_MAX_KEY_SIZE]; + struct fscrypt_master_key_secret secret; + int err; + + get_random_once(test_key, FSCRYPT_MAX_KEY_SIZE); + + memset(&secret, 0, sizeof(secret)); + secret.size = FSCRYPT_MAX_KEY_SIZE; + memcpy(secret.raw, test_key, FSCRYPT_MAX_KEY_SIZE); + + err = add_master_key(sb, &secret, key_spec); + wipe_master_key_secret(&secret); + return err; +} + +/* * Verify that the current user has added a master key with the given identifier * (returns -ENOKEY if not). This is needed to prevent a user from encrypting * their files using some other user's key which they don't actually know. diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 302375e9f719..1129adfa097d 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -46,6 +46,8 @@ struct fscrypt_mode fscrypt_modes[] = { }, }; +static DEFINE_MUTEX(fscrypt_mode_key_setup_mutex); + static struct fscrypt_mode * select_encryption_mode(const union fscrypt_policy *policy, const struct inode *inode) @@ -130,7 +132,7 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, const struct super_block *sb = inode->i_sb; struct fscrypt_mode *mode = ci->ci_mode; const u8 mode_num = mode - fscrypt_modes; - struct crypto_skcipher *tfm, *prev_tfm; + struct crypto_skcipher *tfm; u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; unsigned int hkdf_infolen = 0; @@ -139,10 +141,17 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX)) return -EINVAL; - /* pairs with cmpxchg() below */ + /* pairs with smp_store_release() below */ tfm = READ_ONCE(tfms[mode_num]); - if (likely(tfm != NULL)) - goto done; + if (likely(tfm != NULL)) { + ci->ci_ctfm = tfm; + return 0; + } + + mutex_lock(&fscrypt_mode_key_setup_mutex); + + if (tfms[mode_num]) + goto done_unlock; BUILD_BUG_ON(sizeof(mode_num) != 1); BUILD_BUG_ON(sizeof(sb->s_uuid) != 16); @@ -157,21 +166,21 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, hkdf_context, hkdf_info, hkdf_infolen, mode_key, mode->keysize); if (err) - return err; + goto out_unlock; tfm = fscrypt_allocate_skcipher(mode, mode_key, inode); memzero_explicit(mode_key, mode->keysize); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - /* pairs with READ_ONCE() above */ - prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm); - if (prev_tfm != NULL) { - crypto_free_skcipher(tfm); - tfm = prev_tfm; + if (IS_ERR(tfm)) { + err = PTR_ERR(tfm); + goto out_unlock; } -done: + /* pairs with READ_ONCE() above */ + smp_store_release(&tfms[mode_num], tfm); +done_unlock: ci->ci_ctfm = tfm; - return 0; + err = 0; +out_unlock: + mutex_unlock(&fscrypt_mode_key_setup_mutex); + return err; } int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, @@ -189,6 +198,43 @@ int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, return 0; } +static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk) +{ + int err; + + err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_32_keys, + HKDF_CONTEXT_IV_INO_LBLK_32_KEY, true); + if (err) + return err; + + /* pairs with smp_store_release() below */ + if (!smp_load_acquire(&mk->mk_ino_hash_key_initialized)) { + + mutex_lock(&fscrypt_mode_key_setup_mutex); + + if (mk->mk_ino_hash_key_initialized) + goto unlock; + + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + HKDF_CONTEXT_INODE_HASH_KEY, NULL, 0, + (u8 *)&mk->mk_ino_hash_key, + sizeof(mk->mk_ino_hash_key)); + if (err) + goto unlock; + /* pairs with smp_load_acquire() above */ + smp_store_release(&mk->mk_ino_hash_key_initialized, true); +unlock: + mutex_unlock(&fscrypt_mode_key_setup_mutex); + if (err) + return err; + } + + ci->ci_hashed_ino = (u32)siphash_1u64(ci->ci_inode->i_ino, + &mk->mk_ino_hash_key); + return 0; +} + static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk) { @@ -203,7 +249,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * encryption key. This ensures that the master key is * consistently used only for HKDF, avoiding key reuse issues. */ - err = setup_per_mode_enc_key(ci, mk, mk->mk_direct_tfms, + err = setup_per_mode_enc_key(ci, mk, mk->mk_direct_keys, HKDF_CONTEXT_DIRECT_KEY, false); } else if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { @@ -211,11 +257,14 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * IV_INO_LBLK_64: encryption keys are derived from (master_key, * mode_num, filesystem_uuid), and inode number is included in * the IVs. This format is optimized for use with inline - * encryption hardware compliant with the UFS or eMMC standards. + * encryption hardware compliant with the UFS standard. */ - err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms, + err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, HKDF_CONTEXT_IV_INO_LBLK_64_KEY, true); + } else if (ci->ci_policy.v2.flags & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { + err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk); } else { u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; @@ -395,21 +444,18 @@ int fscrypt_get_encryption_info(struct inode *inode) res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { - if (!fscrypt_dummy_context_enabled(inode) || - IS_ENCRYPTED(inode)) { + const union fscrypt_context *dummy_ctx = + fscrypt_get_dummy_context(inode->i_sb); + + if (IS_ENCRYPTED(inode) || !dummy_ctx) { fscrypt_warn(inode, "Error %d getting encryption context", res); return res; } /* Fake up a context for an unencrypted directory */ - memset(&ctx, 0, sizeof(ctx)); - ctx.version = FSCRYPT_CONTEXT_V1; - ctx.v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; - ctx.v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; - memset(ctx.v1.master_key_descriptor, 0x42, - FSCRYPT_KEY_DESCRIPTOR_SIZE); - res = sizeof(ctx.v1); + res = fscrypt_context_size(dummy_ctx); + memcpy(&ctx, dummy_ctx, res); } crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_NOFS); @@ -475,7 +521,8 @@ out: EXPORT_SYMBOL(fscrypt_get_encryption_info); /** - * fscrypt_put_encryption_info - free most of an inode's fscrypt data + * fscrypt_put_encryption_info() - free most of an inode's fscrypt data + * @inode: an inode being evicted * * Free the inode's fscrypt_info. Filesystems must call this when the inode is * being evicted. An RCU grace period need not have elapsed yet. @@ -488,7 +535,8 @@ void fscrypt_put_encryption_info(struct inode *inode) EXPORT_SYMBOL(fscrypt_put_encryption_info); /** - * fscrypt_free_inode - free an inode's fscrypt data requiring RCU delay + * fscrypt_free_inode() - free an inode's fscrypt data requiring RCU delay + * @inode: an inode being freed * * Free the inode's cached decrypted symlink target, if any. Filesystems must * call this after an RCU grace period, just before they free the inode. @@ -503,7 +551,8 @@ void fscrypt_free_inode(struct inode *inode) EXPORT_SYMBOL(fscrypt_free_inode); /** - * fscrypt_drop_inode - check whether the inode's master key has been removed + * fscrypt_drop_inode() - check whether the inode's master key has been removed + * @inode: an inode being considered for eviction * * Filesystems supporting fscrypt must call this from their ->drop_inode() * method so that encrypted inodes are evicted as soon as they're no longer in diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 10ccf945020c..d23ff162c78b 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -11,12 +11,15 @@ */ #include <linux/random.h> +#include <linux/seq_file.h> #include <linux/string.h> #include <linux/mount.h> #include "fscrypt_private.h" /** - * fscrypt_policies_equal - check whether two encryption policies are the same + * fscrypt_policies_equal() - check whether two encryption policies are the same + * @policy1: the first policy + * @policy2: the second policy * * Return: %true if equal, else %false */ @@ -66,18 +69,14 @@ static bool supported_direct_key_modes(const struct inode *inode, return true; } -static bool supported_iv_ino_lblk_64_policy( - const struct fscrypt_policy_v2 *policy, - const struct inode *inode) +static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, + const struct inode *inode, + const char *type, + int max_ino_bits, int max_lblk_bits) { struct super_block *sb = inode->i_sb; int ino_bits = 64, lblk_bits = 64; - if (policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { - fscrypt_warn(inode, - "The DIRECT_KEY and IV_INO_LBLK_64 flags are mutually exclusive"); - return false; - } /* * It's unsafe to include inode numbers in the IVs if the filesystem can * potentially renumber inodes, e.g. via filesystem shrinking. @@ -85,16 +84,22 @@ static bool supported_iv_ino_lblk_64_policy( if (!sb->s_cop->has_stable_inodes || !sb->s_cop->has_stable_inodes(sb)) { fscrypt_warn(inode, - "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't have stable inode numbers", - sb->s_id); + "Can't use %s policy on filesystem '%s' because it doesn't have stable inode numbers", + type, sb->s_id); return false; } if (sb->s_cop->get_ino_and_lblk_bits) sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); - if (ino_bits > 32 || lblk_bits > 32) { + if (ino_bits > max_ino_bits) { + fscrypt_warn(inode, + "Can't use %s policy on filesystem '%s' because its inode numbers are too long", + type, sb->s_id); + return false; + } + if (lblk_bits > max_lblk_bits) { fscrypt_warn(inode, - "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't use 32-bit inode and block numbers", - sb->s_id); + "Can't use %s policy on filesystem '%s' because its block numbers are too long", + type, sb->s_id); return false; } return true; @@ -137,6 +142,8 @@ static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy, static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, const struct inode *inode) { + int count = 0; + if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, policy->filenames_encryption_mode)) { fscrypt_warn(inode, @@ -152,13 +159,29 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, return false; } + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY); + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64); + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32); + if (count > 1) { + fscrypt_warn(inode, "Mutually exclusive encryption flags (0x%02x)", + policy->flags); + return false; + } + if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && !supported_direct_key_modes(inode, policy->contents_encryption_mode, policy->filenames_encryption_mode)) return false; if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && - !supported_iv_ino_lblk_64_policy(policy, inode)) + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_64", + 32, 32)) + return false; + + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && + /* This uses hashed inode numbers, so ino_bits doesn't matter. */ + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32", + INT_MAX, 32)) return false; if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { @@ -170,7 +193,9 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, } /** - * fscrypt_supported_policy - check whether an encryption policy is supported + * fscrypt_supported_policy() - check whether an encryption policy is supported + * @policy_u: the encryption policy + * @inode: the inode on which the policy will be used * * Given an encryption policy, check whether all its encryption modes and other * settings are supported by this kernel on the given inode. (But we don't @@ -192,7 +217,10 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, } /** - * fscrypt_new_context_from_policy - create a new fscrypt_context from a policy + * fscrypt_new_context_from_policy() - create a new fscrypt_context from + * an fscrypt_policy + * @ctx_u: output context + * @policy_u: input policy * * Create an fscrypt_context for an inode that is being assigned the given * encryption policy. A new nonce is randomly generated. @@ -242,7 +270,11 @@ static int fscrypt_new_context_from_policy(union fscrypt_context *ctx_u, } /** - * fscrypt_policy_from_context - convert an fscrypt_context to an fscrypt_policy + * fscrypt_policy_from_context() - convert an fscrypt_context to + * an fscrypt_policy + * @policy_u: output policy + * @ctx_u: input context + * @ctx_size: size of input context in bytes * * Given an fscrypt_context, build the corresponding fscrypt_policy. * @@ -354,6 +386,9 @@ static int set_encryption_policy(struct inode *inode, policy->v2.master_key_identifier); if (err) return err; + if (policy->v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) + pr_warn_once("%s (pid %d) is setting an IV_INO_LBLK_32 encryption policy. This should only be used if there are certain hardware limitations.\n", + current->comm, current->pid); break; default: WARN_ON(1); @@ -605,3 +640,127 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, return preload ? fscrypt_get_encryption_info(child): 0; } EXPORT_SYMBOL(fscrypt_inherit_context); + +/** + * fscrypt_set_test_dummy_encryption() - handle '-o test_dummy_encryption' + * @sb: the filesystem on which test_dummy_encryption is being specified + * @arg: the argument to the test_dummy_encryption option. + * If no argument was specified, then @arg->from == NULL. + * @dummy_ctx: the filesystem's current dummy context (input/output, see below) + * + * Handle the test_dummy_encryption mount option by creating a dummy encryption + * context, saving it in @dummy_ctx, and adding the corresponding dummy + * encryption key to the filesystem. If the @dummy_ctx is already set, then + * instead validate that it matches @arg. Don't support changing it via + * remount, as that is difficult to do safely. + * + * The reason we use an fscrypt_context rather than an fscrypt_policy is because + * we mustn't generate a new nonce each time we access a dummy-encrypted + * directory, as that would change the way filenames are encrypted. + * + * Return: 0 on success (dummy context set, or the same context is already set); + * -EEXIST if a different dummy context is already set; + * or another -errno value. + */ +int fscrypt_set_test_dummy_encryption(struct super_block *sb, + const substring_t *arg, + struct fscrypt_dummy_context *dummy_ctx) +{ + const char *argstr = "v2"; + const char *argstr_to_free = NULL; + struct fscrypt_key_specifier key_spec = { 0 }; + int version; + union fscrypt_context *ctx = NULL; + int err; + + if (arg->from) { + argstr = argstr_to_free = match_strdup(arg); + if (!argstr) + return -ENOMEM; + } + + if (!strcmp(argstr, "v1")) { + version = FSCRYPT_CONTEXT_V1; + key_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; + memset(key_spec.u.descriptor, 0x42, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + } else if (!strcmp(argstr, "v2")) { + version = FSCRYPT_CONTEXT_V2; + key_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; + /* key_spec.u.identifier gets filled in when adding the key */ + } else { + err = -EINVAL; + goto out; + } + + if (dummy_ctx->ctx) { + /* + * Note: if we ever make test_dummy_encryption support + * specifying other encryption settings, such as the encryption + * modes, we'll need to compare those settings here. + */ + if (dummy_ctx->ctx->version == version) + err = 0; + else + err = -EEXIST; + goto out; + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + err = -ENOMEM; + goto out; + } + + err = fscrypt_add_test_dummy_key(sb, &key_spec); + if (err) + goto out; + + ctx->version = version; + switch (ctx->version) { + case FSCRYPT_CONTEXT_V1: + ctx->v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + ctx->v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; + memcpy(ctx->v1.master_key_descriptor, key_spec.u.descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + break; + case FSCRYPT_CONTEXT_V2: + ctx->v2.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + ctx->v2.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; + memcpy(ctx->v2.master_key_identifier, key_spec.u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); + break; + default: + WARN_ON(1); + err = -EINVAL; + goto out; + } + dummy_ctx->ctx = ctx; + ctx = NULL; + err = 0; +out: + kfree(ctx); + kfree(argstr_to_free); + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_set_test_dummy_encryption); + +/** + * fscrypt_show_test_dummy_encryption() - show '-o test_dummy_encryption' + * @seq: the seq_file to print the option to + * @sep: the separator character to use + * @sb: the filesystem whose options are being shown + * + * Show the test_dummy_encryption mount option, if it was specified. + * This is mainly used for /proc/mounts. + */ +void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, + struct super_block *sb) +{ + const union fscrypt_context *ctx = fscrypt_get_dummy_context(sb); + + if (!ctx) + return; + seq_printf(seq, "%ctest_dummy_encryption=v%d", sep, ctx->version); +} +EXPORT_SYMBOL_GPL(fscrypt_show_test_dummy_encryption); diff --git a/fs/dcache.c b/fs/dcache.c index b280e07e162b..361ea7ab30ea 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -165,7 +165,7 @@ static long get_nr_dentry_negative(void) return sum < 0 ? 0 : sum; } -int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, +int proc_nr_dentry(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { dentry_stat.nr_dentry = get_nr_dentry(); @@ -647,6 +647,10 @@ static inline bool retain_dentry(struct dentry *dentry) if (dentry->d_op->d_delete(dentry)) return false; } + + if (unlikely(dentry->d_flags & DCACHE_DONTCACHE)) + return false; + /* retain; LRU fodder */ dentry->d_lockref.count--; if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) @@ -656,6 +660,21 @@ static inline bool retain_dentry(struct dentry *dentry) return true; } +void d_mark_dontcache(struct inode *inode) +{ + struct dentry *de; + + spin_lock(&inode->i_lock); + hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) { + spin_lock(&de->d_lock); + de->d_flags |= DCACHE_DONTCACHE; + spin_unlock(&de->d_lock); + } + inode->i_state |= I_DONTCACHE; + spin_unlock(&inode->i_lock); +} +EXPORT_SYMBOL(d_mark_dontcache); + /* * Finish off a dentry we've decided to kill. * dentry->d_lock must be held, returns with it unlocked. diff --git a/fs/direct-io.c b/fs/direct-io.c index 00b4d15bb811..1543b5af400e 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -386,25 +386,6 @@ static void dio_bio_end_io(struct bio *bio) spin_unlock_irqrestore(&dio->bio_lock, flags); } -/** - * dio_end_io - handle the end io action for the given bio - * @bio: The direct io bio thats being completed - * - * This is meant to be called by any filesystem that uses their own dio_submit_t - * so that the DIO specific endio actions are dealt with after the filesystem - * has done it's completion work. - */ -void dio_end_io(struct bio *bio) -{ - struct dio *dio = bio->bi_private; - - if (dio->is_async) - dio_bio_end_aio(bio); - else - dio_bio_end_io(bio); -} -EXPORT_SYMBOL_GPL(dio_end_io); - static inline void dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, struct block_device *bdev, @@ -500,7 +481,7 @@ static struct bio *dio_await_one(struct dio *dio) spin_unlock_irqrestore(&dio->bio_lock, flags); if (!(dio->iocb->ki_flags & IOCB_HIPRI) || !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true)) - io_schedule(); + blk_io_schedule(); /* wake up sets us TASK_RUNNING */ spin_lock_irqsave(&dio->bio_lock, flags); dio->waiter = NULL; diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index cdfaf4f0e11a..3543a8fec907 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -724,7 +724,7 @@ out_close: } /* Listening socket is busy, accept a connection */ -static int tcp_accept_from_sock(struct connection *con) +static int accept_from_sock(struct connection *con) { int result; struct sockaddr_storage peeraddr; @@ -852,123 +852,6 @@ accept_err: return result; } -static int sctp_accept_from_sock(struct connection *con) -{ - /* Check that the new node is in the lockspace */ - struct sctp_prim prim; - int nodeid; - int prim_len, ret; - int addr_len; - struct connection *newcon; - struct connection *addcon; - struct socket *newsock; - - mutex_lock(&connections_lock); - if (!dlm_allow_conn) { - mutex_unlock(&connections_lock); - return -1; - } - mutex_unlock(&connections_lock); - - mutex_lock_nested(&con->sock_mutex, 0); - - ret = kernel_accept(con->sock, &newsock, O_NONBLOCK); - if (ret < 0) - goto accept_err; - - memset(&prim, 0, sizeof(struct sctp_prim)); - prim_len = sizeof(struct sctp_prim); - - ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR, - (char *)&prim, &prim_len); - if (ret < 0) { - log_print("getsockopt/sctp_primary_addr failed: %d", ret); - goto accept_err; - } - - make_sockaddr(&prim.ssp_addr, 0, &addr_len); - ret = addr_to_nodeid(&prim.ssp_addr, &nodeid); - if (ret) { - unsigned char *b = (unsigned char *)&prim.ssp_addr; - - log_print("reject connect from unknown addr"); - print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, - b, sizeof(struct sockaddr_storage)); - goto accept_err; - } - - newcon = nodeid2con(nodeid, GFP_NOFS); - if (!newcon) { - ret = -ENOMEM; - goto accept_err; - } - - mutex_lock_nested(&newcon->sock_mutex, 1); - - if (newcon->sock) { - struct connection *othercon = newcon->othercon; - - if (!othercon) { - othercon = kmem_cache_zalloc(con_cache, GFP_NOFS); - if (!othercon) { - log_print("failed to allocate incoming socket"); - mutex_unlock(&newcon->sock_mutex); - ret = -ENOMEM; - goto accept_err; - } - othercon->nodeid = nodeid; - othercon->rx_action = receive_from_sock; - mutex_init(&othercon->sock_mutex); - INIT_LIST_HEAD(&othercon->writequeue); - spin_lock_init(&othercon->writequeue_lock); - INIT_WORK(&othercon->swork, process_send_sockets); - INIT_WORK(&othercon->rwork, process_recv_sockets); - set_bit(CF_IS_OTHERCON, &othercon->flags); - } - mutex_lock_nested(&othercon->sock_mutex, 2); - if (!othercon->sock) { - newcon->othercon = othercon; - add_sock(newsock, othercon); - addcon = othercon; - mutex_unlock(&othercon->sock_mutex); - } else { - printk("Extra connection from node %d attempted\n", nodeid); - ret = -EAGAIN; - mutex_unlock(&othercon->sock_mutex); - mutex_unlock(&newcon->sock_mutex); - goto accept_err; - } - } else { - newcon->rx_action = receive_from_sock; - add_sock(newsock, newcon); - addcon = newcon; - } - - log_print("connected to %d", nodeid); - - mutex_unlock(&newcon->sock_mutex); - - /* - * Add it to the active queue in case we got data - * between processing the accept adding the socket - * to the read_sockets list - */ - if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) - queue_work(recv_workqueue, &addcon->rwork); - mutex_unlock(&con->sock_mutex); - - return 0; - -accept_err: - mutex_unlock(&con->sock_mutex); - if (newsock) - sock_release(newsock); - if (ret != -EAGAIN) - log_print("error accepting connection from node: %d", ret); - - return ret; -} - static void free_entry(struct writequeue_entry *e) { __free_page(e->page); @@ -999,6 +882,7 @@ static void writequeue_entry_complete(struct writequeue_entry *e, int completed) static int sctp_bind_addrs(struct connection *con, uint16_t port) { struct sockaddr_storage localaddr; + struct sockaddr *addr = (struct sockaddr *)&localaddr; int i, addr_len, result = 0; for (i = 0; i < dlm_local_count; i++) { @@ -1006,13 +890,9 @@ static int sctp_bind_addrs(struct connection *con, uint16_t port) make_sockaddr(&localaddr, port, &addr_len); if (!i) - result = kernel_bind(con->sock, - (struct sockaddr *)&localaddr, - addr_len); + result = kernel_bind(con->sock, addr, addr_len); else - result = kernel_setsockopt(con->sock, SOL_SCTP, - SCTP_SOCKOPT_BINDX_ADD, - (char *)&localaddr, addr_len); + result = sock_bind_add(con->sock->sk, addr, addr_len); if (result < 0) { log_print("Can't bind to %d addr number %d, %d.\n", @@ -1031,11 +911,9 @@ static int sctp_bind_addrs(struct connection *con, uint16_t port) static void sctp_connect_to_sock(struct connection *con) { struct sockaddr_storage daddr; - int one = 1; int result; int addr_len; struct socket *sock; - struct __kernel_sock_timeval tv = { .tv_sec = 5, .tv_usec = 0 }; if (con->nodeid == 0) { log_print("attempt to connect sock 0 foiled"); @@ -1079,21 +957,17 @@ static void sctp_connect_to_sock(struct connection *con) log_print("connecting to %d", con->nodeid); /* Turn off Nagle's algorithm */ - kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one, - sizeof(one)); + sctp_sock_set_nodelay(sock->sk); /* * Make sock->ops->connect() function return in specified time, * since O_NONBLOCK argument in connect() function does not work here, * then, we should restore the default value of this attribute. */ - kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv, - sizeof(tv)); + sock_set_sndtimeo(sock->sk, 5); result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len, 0); - memset(&tv, 0, sizeof(tv)); - kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv, - sizeof(tv)); + sock_set_sndtimeo(sock->sk, 0); if (result == -EINPROGRESS) result = 0; @@ -1132,7 +1006,6 @@ static void tcp_connect_to_sock(struct connection *con) struct sockaddr_storage saddr, src_addr; int addr_len; struct socket *sock = NULL; - int one = 1; int result; if (con->nodeid == 0) { @@ -1181,8 +1054,7 @@ static void tcp_connect_to_sock(struct connection *con) log_print("connecting to %d", con->nodeid); /* Turn off Nagle's algorithm */ - kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one, - sizeof(one)); + tcp_sock_set_nodelay(sock->sk); result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, O_NONBLOCK); @@ -1224,7 +1096,6 @@ static struct socket *tcp_create_listen_sock(struct connection *con, { struct socket *sock = NULL; int result = 0; - int one = 1; int addr_len; if (dlm_local_addr[0]->ss_family == AF_INET) @@ -1241,19 +1112,14 @@ static struct socket *tcp_create_listen_sock(struct connection *con, } /* Turn off Nagle's algorithm */ - kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one, - sizeof(one)); + tcp_sock_set_nodelay(sock->sk); - result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, - (char *)&one, sizeof(one)); + sock_set_reuseaddr(sock->sk); - if (result < 0) { - log_print("Failed to set SO_REUSEADDR on socket: %d", result); - } write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_user_data = con; save_listen_callbacks(sock); - con->rx_action = tcp_accept_from_sock; + con->rx_action = accept_from_sock; con->connect_action = tcp_connect_to_sock; write_unlock_bh(&sock->sk->sk_callback_lock); @@ -1267,11 +1133,7 @@ static struct socket *tcp_create_listen_sock(struct connection *con, con->sock = NULL; goto create_out; } - result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - (char *)&one, sizeof(one)); - if (result < 0) { - log_print("Set keepalive failed: %d", result); - } + sock_set_keepalive(sock->sk); result = sock->ops->listen(sock, 5); if (result < 0) { @@ -1309,8 +1171,6 @@ static int sctp_listen_for_all(void) struct socket *sock = NULL; int result = -EINVAL; struct connection *con = nodeid2con(0, GFP_NOFS); - int bufsize = NEEDED_RMEM; - int one = 1; if (!con) return -ENOMEM; @@ -1324,15 +1184,8 @@ static int sctp_listen_for_all(void) goto out; } - result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE, - (char *)&bufsize, sizeof(bufsize)); - if (result) - log_print("Error increasing buffer space on socket %d", result); - - result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one, - sizeof(one)); - if (result < 0) - log_print("Could not set SCTP NODELAY error %d\n", result); + sock_set_rcvbuf(sock->sk, NEEDED_RMEM); + sctp_sock_set_nodelay(sock->sk); write_lock_bh(&sock->sk->sk_callback_lock); /* Init con struct */ @@ -1340,7 +1193,7 @@ static int sctp_listen_for_all(void) save_listen_callbacks(sock); con->sock = sock; con->sock->sk->sk_data_ready = lowcomms_data_ready; - con->rx_action = sctp_accept_from_sock; + con->rx_action = accept_from_sock; con->connect_action = sctp_connect_to_sock; write_unlock_bh(&sock->sk->sk_callback_lock); diff --git a/fs/drop_caches.c b/fs/drop_caches.c index dc1a1d5d825b..f00fcc4a4f72 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -47,7 +47,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) } int drop_caches_sysctl_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos) + void *buffer, size_t *length, loff_t *ppos) { int ret; diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig index 522c35d5292b..1bdeaa6d5790 100644 --- a/fs/ecryptfs/Kconfig +++ b/fs/ecryptfs/Kconfig @@ -7,7 +7,7 @@ config ECRYPT_FS select CRYPTO_MD5 help Encrypted filesystem that operates on the VFS layer. See - <file:Documentation/filesystems/ecryptfs.txt> to learn more about + <file:Documentation/filesystems/ecryptfs.rst> to learn more about eCryptfs. Userspace components are required and can be obtained from <http://ecryptfs.sf.net>. diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 2c449aed1b92..0681540c48d9 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -48,18 +48,6 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size) } } -static int ecryptfs_hash_digest(struct crypto_shash *tfm, - char *src, int len, char *dst) -{ - SHASH_DESC_ON_STACK(desc, tfm); - int err; - - desc->tfm = tfm; - err = crypto_shash_digest(desc, src, len, dst); - shash_desc_zero(desc); - return err; -} - /** * ecryptfs_calculate_md5 - calculates the md5 of @src * @dst: Pointer to 16 bytes of allocated memory @@ -74,11 +62,8 @@ static int ecryptfs_calculate_md5(char *dst, struct ecryptfs_crypt_stat *crypt_stat, char *src, int len) { - struct crypto_shash *tfm; - int rc = 0; + int rc = crypto_shash_tfm_digest(crypt_stat->hash_tfm, src, len, dst); - tfm = crypt_stat->hash_tfm; - rc = ecryptfs_hash_digest(tfm, src, len, dst); if (rc) { printk(KERN_ERR "%s: Error computing crypto hash; rc = [%d]\n", diff --git a/fs/erofs/data.c b/fs/erofs/data.c index fc3a8d8064f8..64b56c7df023 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -265,7 +265,7 @@ submit_bio_out: */ static int erofs_raw_access_readpage(struct file *file, struct page *page) { - erofs_off_t last_block; + erofs_off_t uninitialized_var(last_block); struct bio *bio; trace_erofs_readpage(page, true); @@ -280,47 +280,36 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page) return 0; } -static int erofs_raw_access_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) +static void erofs_raw_access_readahead(struct readahead_control *rac) { - erofs_off_t last_block; + erofs_off_t uninitialized_var(last_block); struct bio *bio = NULL; - gfp_t gfp = readahead_gfp_mask(mapping); - struct page *page = list_last_entry(pages, struct page, lru); - - trace_erofs_readpages(mapping->host, page, nr_pages, true); + struct page *page; - for (; nr_pages; --nr_pages) { - page = list_entry(pages->prev, struct page, lru); + trace_erofs_readpages(rac->mapping->host, readahead_index(rac), + readahead_count(rac), true); + while ((page = readahead_page(rac))) { prefetchw(&page->flags); - list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { - bio = erofs_read_raw_page(bio, mapping, page, - &last_block, nr_pages, true); + bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block, + readahead_count(rac), true); - /* all the page errors are ignored when readahead */ - if (IS_ERR(bio)) { - pr_err("%s, readahead error at page %lu of nid %llu\n", - __func__, page->index, - EROFS_I(mapping->host)->nid); + /* all the page errors are ignored when readahead */ + if (IS_ERR(bio)) { + pr_err("%s, readahead error at page %lu of nid %llu\n", + __func__, page->index, + EROFS_I(rac->mapping->host)->nid); - bio = NULL; - } + bio = NULL; } - /* pages could still be locked */ put_page(page); } - DBG_BUGON(!list_empty(pages)); /* the rare case (end in gaps) */ if (bio) submit_bio(bio); - return 0; } static int erofs_get_block(struct inode *inode, sector_t iblock, @@ -358,7 +347,7 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block) /* for uncompressed (aligned) files and raw access for other files */ const struct address_space_operations erofs_raw_access_aops = { .readpage = erofs_raw_access_readpage, - .readpages = erofs_raw_access_readpages, + .readahead = erofs_raw_access_readahead, .bmap = erofs_bmap, }; diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 5d2d81940679..7628816f2453 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -274,7 +274,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq, i = 0; while (1) { - dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL); + dst = vm_map_ram(rq->out, nrpages_out, -1); /* retry two more times (totally 3 times) */ if (dst || ++i >= 3) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 3350ab65d892..7dd4bbe9674f 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -311,27 +311,21 @@ int erofs_getattr(const struct path *path, struct kstat *stat, const struct inode_operations erofs_generic_iops = { .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR .listxattr = erofs_listxattr, -#endif .get_acl = erofs_get_acl, }; const struct inode_operations erofs_symlink_iops = { .get_link = page_get_link, .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR .listxattr = erofs_listxattr, -#endif .get_acl = erofs_get_acl, }; const struct inode_operations erofs_fast_symlink_iops = { .get_link = simple_get_link, .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR .listxattr = erofs_listxattr, -#endif .get_acl = erofs_get_acl, }; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 5eead7fdc7a6..1c077b7bb43d 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -46,6 +46,17 @@ typedef u64 erofs_off_t; /* data type for filesystem-wide blocks number */ typedef u32 erofs_blk_t; +struct erofs_fs_context { +#ifdef CONFIG_EROFS_FS_ZIP + /* current strategy of how to use managed cache */ + unsigned char cache_strategy; + + /* threshold for decompression synchronously */ + unsigned int max_sync_decompress_pages; +#endif + unsigned int mount_opt; +}; + struct erofs_sb_info { #ifdef CONFIG_EROFS_FS_ZIP /* list for all registered superblocks, mainly for shrinker */ @@ -55,14 +66,8 @@ struct erofs_sb_info { /* managed XArray arranged in physical block number */ struct xarray managed_pslots; - /* threshold for decompression synchronously */ - unsigned int max_sync_decompress_pages; - unsigned int shrinker_run_no; - /* current strategy of how to use managed cache */ - unsigned char cache_strategy; - /* pseudo inode to manage cached pages */ struct inode *managed_cache; #endif /* CONFIG_EROFS_FS_ZIP */ @@ -88,7 +93,7 @@ struct erofs_sb_info { u32 feature_compat; u32 feature_incompat; - unsigned int mount_opt; + struct erofs_fs_context ctx; /* options */ }; #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) @@ -98,17 +103,17 @@ struct erofs_sb_info { #define EROFS_MOUNT_XATTR_USER 0x00000010 #define EROFS_MOUNT_POSIX_ACL 0x00000020 -#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option) -#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) -#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) +#define clear_opt(ctx, option) ((ctx)->mount_opt &= ~EROFS_MOUNT_##option) +#define set_opt(ctx, option) ((ctx)->mount_opt |= EROFS_MOUNT_##option) +#define test_opt(ctx, option) ((ctx)->mount_opt & EROFS_MOUNT_##option) -#ifdef CONFIG_EROFS_FS_ZIP enum { EROFS_ZIP_CACHE_DISABLED, EROFS_ZIP_CACHE_READAHEAD, EROFS_ZIP_CACHE_READAROUND }; +#ifdef CONFIG_EROFS_FS_ZIP #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) /* basic unit of the workstation of a super_block */ diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c index 3abbecbf73de..52f201e03c62 100644 --- a/fs/erofs/namei.c +++ b/fs/erofs/namei.c @@ -244,9 +244,7 @@ static struct dentry *erofs_lookup(struct inode *dir, const struct inode_operations erofs_dir_iops = { .lookup = erofs_lookup, .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR .listxattr = erofs_listxattr, -#endif .get_acl = erofs_get_acl, }; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index b514c67e5fc2..7a13ffb07c23 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -10,6 +10,8 @@ #include <linux/parser.h> #include <linux/seq_file.h> #include <linux/crc32c.h> +#include <linux/fs_context.h> +#include <linux/fs_parser.h> #include "xattr.h" #define CREATE_TRACE_POINTS @@ -192,53 +194,18 @@ out: return ret; } -#ifdef CONFIG_EROFS_FS_ZIP -static int erofs_build_cache_strategy(struct super_block *sb, - substring_t *args) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - const char *cs = match_strdup(args); - int err = 0; - - if (!cs) { - erofs_err(sb, "Not enough memory to store cache strategy"); - return -ENOMEM; - } - - if (!strcmp(cs, "disabled")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; - } else if (!strcmp(cs, "readahead")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; - } else if (!strcmp(cs, "readaround")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; - } else { - erofs_err(sb, "Unrecognized cache strategy \"%s\"", cs); - err = -EINVAL; - } - kfree(cs); - return err; -} -#else -static int erofs_build_cache_strategy(struct super_block *sb, - substring_t *args) -{ - erofs_info(sb, "EROFS compression is disabled, so cache strategy is ignored"); - return 0; -} -#endif - /* set up default EROFS parameters */ -static void erofs_default_options(struct erofs_sb_info *sbi) +static void erofs_default_options(struct erofs_fs_context *ctx) { #ifdef CONFIG_EROFS_FS_ZIP - sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; - sbi->max_sync_decompress_pages = 3; + ctx->cache_strategy = EROFS_ZIP_CACHE_READAROUND; + ctx->max_sync_decompress_pages = 3; #endif #ifdef CONFIG_EROFS_FS_XATTR - set_opt(sbi, XATTR_USER); + set_opt(ctx, XATTR_USER); #endif #ifdef CONFIG_EROFS_FS_POSIX_ACL - set_opt(sbi, POSIX_ACL); + set_opt(ctx, POSIX_ACL); #endif } @@ -251,73 +218,62 @@ enum { Opt_err }; -static match_table_t erofs_tokens = { - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_cache_strategy, "cache_strategy=%s"}, - {Opt_err, NULL} +static const struct constant_table erofs_param_cache_strategy[] = { + {"disabled", EROFS_ZIP_CACHE_DISABLED}, + {"readahead", EROFS_ZIP_CACHE_READAHEAD}, + {"readaround", EROFS_ZIP_CACHE_READAROUND}, + {} }; -static int erofs_parse_options(struct super_block *sb, char *options) -{ - substring_t args[MAX_OPT_ARGS]; - char *p; - int err; - - if (!options) - return 0; - - while ((p = strsep(&options, ","))) { - int token; +static const struct fs_parameter_spec erofs_fs_parameters[] = { + fsparam_flag_no("user_xattr", Opt_user_xattr), + fsparam_flag_no("acl", Opt_acl), + fsparam_enum("cache_strategy", Opt_cache_strategy, + erofs_param_cache_strategy), + {} +}; - if (!*p) - continue; +static int erofs_fc_parse_param(struct fs_context *fc, + struct fs_parameter *param) +{ + struct erofs_fs_context *ctx __maybe_unused = fc->fs_private; + struct fs_parse_result result; + int opt; - args[0].to = args[0].from = NULL; - token = match_token(p, erofs_tokens, args); + opt = fs_parse(fc, erofs_fs_parameters, param, &result); + if (opt < 0) + return opt; - switch (token) { + switch (opt) { + case Opt_user_xattr: #ifdef CONFIG_EROFS_FS_XATTR - case Opt_user_xattr: - set_opt(EROFS_SB(sb), XATTR_USER); - break; - case Opt_nouser_xattr: - clear_opt(EROFS_SB(sb), XATTR_USER); - break; + if (result.boolean) + set_opt(ctx, XATTR_USER); + else + clear_opt(ctx, XATTR_USER); #else - case Opt_user_xattr: - erofs_info(sb, "user_xattr options not supported"); - break; - case Opt_nouser_xattr: - erofs_info(sb, "nouser_xattr options not supported"); - break; + errorfc(fc, "{,no}user_xattr options not supported"); #endif + break; + case Opt_acl: #ifdef CONFIG_EROFS_FS_POSIX_ACL - case Opt_acl: - set_opt(EROFS_SB(sb), POSIX_ACL); - break; - case Opt_noacl: - clear_opt(EROFS_SB(sb), POSIX_ACL); - break; + if (result.boolean) + set_opt(ctx, POSIX_ACL); + else + clear_opt(ctx, POSIX_ACL); #else - case Opt_acl: - erofs_info(sb, "acl options not supported"); - break; - case Opt_noacl: - erofs_info(sb, "noacl options not supported"); - break; + errorfc(fc, "{,no}acl options not supported"); #endif - case Opt_cache_strategy: - err = erofs_build_cache_strategy(sb, args); - if (err) - return err; - break; - default: - erofs_err(sb, "Unrecognized mount option \"%s\" or missing value", p); - return -EINVAL; - } + break; + case Opt_cache_strategy: +#ifdef CONFIG_EROFS_FS_ZIP + ctx->cache_strategy = result.uint_32; +#else + errorfc(fc, "compression not supported, cache_strategy ignored"); +#endif + break; + default: + return -ENOPARAM; } return 0; } @@ -381,10 +337,11 @@ static int erofs_init_managed_cache(struct super_block *sb) static int erofs_init_managed_cache(struct super_block *sb) { return 0; } #endif -static int erofs_fill_super(struct super_block *sb, void *data, int silent) +static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) { struct inode *inode; struct erofs_sb_info *sbi; + struct erofs_fs_context *ctx = fc->fs_private; int err; sb->s_magic = EROFS_SUPER_MAGIC; @@ -408,22 +365,15 @@ static int erofs_fill_super(struct super_block *sb, void *data, int silent) sb->s_time_gran = 1; sb->s_op = &erofs_sops; - -#ifdef CONFIG_EROFS_FS_XATTR sb->s_xattr = erofs_xattr_handlers; -#endif - /* set erofs default mount options */ - erofs_default_options(sbi); - err = erofs_parse_options(sb, data); - if (err) - return err; - - if (test_opt(sbi, POSIX_ACL)) + if (test_opt(ctx, POSIX_ACL)) sb->s_flags |= SB_POSIXACL; else sb->s_flags &= ~SB_POSIXACL; + sbi->ctx = *ctx; + #ifdef CONFIG_EROFS_FS_ZIP xa_init(&sbi->managed_pslots); #endif @@ -450,15 +400,58 @@ static int erofs_fill_super(struct super_block *sb, void *data, int silent) if (err) return err; - erofs_info(sb, "mounted with opts: %s, root inode @ nid %llu.", - (char *)data, ROOT_NID(sbi)); + erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi)); + return 0; +} + +static int erofs_fc_get_tree(struct fs_context *fc) +{ + return get_tree_bdev(fc, erofs_fc_fill_super); +} + +static int erofs_fc_reconfigure(struct fs_context *fc) +{ + struct super_block *sb = fc->root->d_sb; + struct erofs_sb_info *sbi = EROFS_SB(sb); + struct erofs_fs_context *ctx = fc->fs_private; + + DBG_BUGON(!sb_rdonly(sb)); + + if (test_opt(ctx, POSIX_ACL)) + fc->sb_flags |= SB_POSIXACL; + else + fc->sb_flags &= ~SB_POSIXACL; + + sbi->ctx = *ctx; + + fc->sb_flags |= SB_RDONLY; return 0; } -static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) +static void erofs_fc_free(struct fs_context *fc) { - return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); + kfree(fc->fs_private); +} + +static const struct fs_context_operations erofs_context_ops = { + .parse_param = erofs_fc_parse_param, + .get_tree = erofs_fc_get_tree, + .reconfigure = erofs_fc_reconfigure, + .free = erofs_fc_free, +}; + +static int erofs_init_fs_context(struct fs_context *fc) +{ + fc->fs_private = kzalloc(sizeof(struct erofs_fs_context), GFP_KERNEL); + if (!fc->fs_private) + return -ENOMEM; + + /* set default mount options */ + erofs_default_options(fc->fs_private); + + fc->ops = &erofs_context_ops; + + return 0; } /* @@ -497,7 +490,7 @@ static void erofs_put_super(struct super_block *sb) static struct file_system_type erofs_fs_type = { .owner = THIS_MODULE, .name = "erofs", - .mount = erofs_mount, + .init_fs_context = erofs_init_fs_context, .kill_sb = erofs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; @@ -578,61 +571,37 @@ static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) static int erofs_show_options(struct seq_file *seq, struct dentry *root) { struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); + struct erofs_fs_context *ctx __maybe_unused = &sbi->ctx; #ifdef CONFIG_EROFS_FS_XATTR - if (test_opt(sbi, XATTR_USER)) + if (test_opt(ctx, XATTR_USER)) seq_puts(seq, ",user_xattr"); else seq_puts(seq, ",nouser_xattr"); #endif #ifdef CONFIG_EROFS_FS_POSIX_ACL - if (test_opt(sbi, POSIX_ACL)) + if (test_opt(ctx, POSIX_ACL)) seq_puts(seq, ",acl"); else seq_puts(seq, ",noacl"); #endif #ifdef CONFIG_EROFS_FS_ZIP - if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { + if (ctx->cache_strategy == EROFS_ZIP_CACHE_DISABLED) seq_puts(seq, ",cache_strategy=disabled"); - } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { + else if (ctx->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) seq_puts(seq, ",cache_strategy=readahead"); - } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { + else if (ctx->cache_strategy == EROFS_ZIP_CACHE_READAROUND) seq_puts(seq, ",cache_strategy=readaround"); - } #endif return 0; } -static int erofs_remount(struct super_block *sb, int *flags, char *data) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - unsigned int org_mnt_opt = sbi->mount_opt; - int err; - - DBG_BUGON(!sb_rdonly(sb)); - err = erofs_parse_options(sb, data); - if (err) - goto out; - - if (test_opt(sbi, POSIX_ACL)) - sb->s_flags |= SB_POSIXACL; - else - sb->s_flags &= ~SB_POSIXACL; - - *flags |= SB_RDONLY; - return 0; -out: - sbi->mount_opt = org_mnt_opt; - return err; -} - const struct super_operations erofs_sops = { .put_super = erofs_put_super, .alloc_inode = erofs_alloc_inode, .free_inode = erofs_free_inode, .statfs = erofs_statfs, .show_options = erofs_show_options, - .remount_fs = erofs_remount, }; module_init(erofs_module_init); diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index b766c3ee5fa8..87e437e7b34f 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -422,7 +422,7 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) static bool erofs_xattr_user_list(struct dentry *dentry) { - return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER); + return test_opt(&EROFS_SB(dentry->d_sb)->ctx, XATTR_USER); } static bool erofs_xattr_trusted_list(struct dentry *dentry) @@ -469,7 +469,7 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler, switch (handler->flags) { case EROFS_XATTR_INDEX_USER: - if (!test_opt(sbi, XATTR_USER)) + if (!test_opt(&sbi->ctx, XATTR_USER)) return -EOPNOTSUPP; break; case EROFS_XATTR_INDEX_TRUSTED: diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h index 50966f1c676e..e4e5093f012c 100644 --- a/fs/erofs/xattr.h +++ b/fs/erofs/xattr.h @@ -76,11 +76,8 @@ static inline int erofs_getxattr(struct inode *inode, int index, return -EOPNOTSUPP; } -static inline ssize_t erofs_listxattr(struct dentry *dentry, - char *buffer, size_t buffer_size) -{ - return -EOPNOTSUPP; -} +#define erofs_listxattr (NULL) +#define erofs_xattr_handlers (NULL) #endif /* !CONFIG_EROFS_FS_XATTR */ #ifdef CONFIG_EROFS_FS_POSIX_ACL diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index c4b6c9aa87ec..be50a4d9d273 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -615,7 +615,7 @@ restart_now: goto err_out; /* preload all compressed pages (maybe downgrade role if necessary) */ - if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) + if (should_alloc_managed_pages(fe, sbi->ctx.cache_strategy, map->m_la)) cache_strategy = DELAYEDALLOC; else cache_strategy = DONTALLOC; @@ -1302,31 +1302,26 @@ static int z_erofs_readpage(struct file *file, struct page *page) static bool should_decompress_synchronously(struct erofs_sb_info *sbi, unsigned int nr) { - return nr <= sbi->max_sync_decompress_pages; + return nr <= sbi->ctx.max_sync_decompress_pages; } -static int z_erofs_readpages(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned int nr_pages) +static void z_erofs_readahead(struct readahead_control *rac) { - struct inode *const inode = mapping->host; + struct inode *const inode = rac->mapping->host; struct erofs_sb_info *const sbi = EROFS_I_SB(inode); - bool sync = should_decompress_synchronously(sbi, nr_pages); + bool sync = should_decompress_synchronously(sbi, readahead_count(rac)); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); - struct page *head = NULL; + struct page *page, *head = NULL; LIST_HEAD(pagepool); - trace_erofs_readpages(mapping->host, lru_to_page(pages), - nr_pages, false); + trace_erofs_readpages(inode, readahead_index(rac), + readahead_count(rac), false); - f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; - - for (; nr_pages; --nr_pages) { - struct page *page = lru_to_page(pages); + f.headoffset = readahead_pos(rac); + while ((page = readahead_page(rac))) { prefetchw(&page->flags); - list_del(&page->lru); /* * A pure asynchronous readahead is indicated if @@ -1335,11 +1330,6 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping, */ sync &= !(PageReadahead(page) && !head); - if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { - list_add(&page->lru, &pagepool); - continue; - } - set_page_private(page, (unsigned long)head); head = page; } @@ -1368,11 +1358,10 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping, /* clean up the remaining free pages */ put_pages_list(&pagepool); - return 0; } const struct address_space_operations z_erofs_aops = { .readpage = z_erofs_readpage, - .readpages = z_erofs_readpages, + .readahead = z_erofs_readahead, }; diff --git a/fs/exec.c b/fs/exec.c index 2c465119affc..93ff1c4c7ebb 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -72,6 +72,8 @@ #include <trace/events/sched.h> +static int bprm_creds_from_file(struct linux_binprm *bprm); + int suid_dumpable = 0; static LIST_HEAD(formats); @@ -588,24 +590,48 @@ out: } /* - * Like copy_strings, but get argv and its values from kernel memory. + * Copy and argument/environment string from the kernel to the processes stack. */ -int copy_strings_kernel(int argc, const char *const *__argv, - struct linux_binprm *bprm) +int copy_string_kernel(const char *arg, struct linux_binprm *bprm) { - int r; - mm_segment_t oldfs = get_fs(); - struct user_arg_ptr argv = { - .ptr.native = (const char __user *const __user *)__argv, - }; + int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */; + unsigned long pos = bprm->p; + + if (len == 0) + return -EFAULT; + if (!valid_arg_len(bprm, len)) + return -E2BIG; + + /* We're going to work our way backwards. */ + arg += len; + bprm->p -= len; + if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin) + return -E2BIG; - set_fs(KERNEL_DS); - r = copy_strings(argc, argv, bprm); - set_fs(oldfs); + while (len > 0) { + unsigned int bytes_to_copy = min_t(unsigned int, len, + min_not_zero(offset_in_page(pos), PAGE_SIZE)); + struct page *page; + char *kaddr; + + pos -= bytes_to_copy; + arg -= bytes_to_copy; + len -= bytes_to_copy; + + page = get_arg_page(bprm, pos, 1); + if (!page) + return -E2BIG; + kaddr = kmap_atomic(page); + flush_arg_page(bprm, pos & PAGE_MASK, page); + memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy); + flush_kernel_dcache_page(page); + kunmap_atomic(kaddr); + put_arg_page(page); + } - return r; + return 0; } -EXPORT_SYMBOL(copy_strings_kernel); +EXPORT_SYMBOL(copy_string_kernel); #ifdef CONFIG_MMU @@ -1051,13 +1077,14 @@ static int exec_mmap(struct mm_struct *mm) tsk = current; old_mm = current->mm; exec_mm_release(tsk, old_mm); + if (old_mm) + sync_mm_rss(old_mm); ret = mutex_lock_killable(&tsk->signal->exec_update_mutex); if (ret) return ret; if (old_mm) { - sync_mm_rss(old_mm); /* * Make sure that if there is a core dump in progress * for the old mm, we get out and die instead of going @@ -1093,12 +1120,6 @@ static int exec_mmap(struct mm_struct *mm) return 0; } -/* - * This function makes sure the current process has its own signal table, - * so that flush_signal_handlers can later reset the handlers without - * disturbing other processes. (Other processes might share the signal - * table via the CLONE_SIGHAND option to clone().) - */ static int de_thread(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; @@ -1176,7 +1197,6 @@ static int de_thread(struct task_struct *tsk) tsk->start_boottime = leader->start_boottime; BUG_ON(!same_thread_group(leader, tsk)); - BUG_ON(has_group_leader_pid(tsk)); /* * An exec() starts a new thread group with the * TGID of the previous thread group. Rehash the @@ -1186,11 +1206,8 @@ static int de_thread(struct task_struct *tsk) /* Become a process group leader with the old leader's pid. * The old leader becomes a thread of the this thread group. - * Note: The old leader also uses this pid until release_task - * is called. Odd but simple and correct. */ - tsk->pid = leader->pid; - change_pid(tsk, PIDTYPE_PID, task_pid(leader)); + exchange_tids(tsk, leader); transfer_pid(leader, tsk, PIDTYPE_TGID); transfer_pid(leader, tsk, PIDTYPE_PGID); transfer_pid(leader, tsk, PIDTYPE_SID); @@ -1240,6 +1257,12 @@ killed: } +/* + * This function makes sure the current process has its own signal table, + * so that flush_signal_handlers can later reset the handlers without + * disturbing other processes. (Other processes might share the signal + * table via the CLONE_SIGHAND option to clone().) + */ static int unshare_sighand(struct task_struct *me) { struct sighand_struct *oldsighand = me->sighand; @@ -1296,13 +1319,23 @@ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec) * Calling this is the point of no return. None of the failures will be * seen by userspace since either the process is already taking a fatal * signal (via de_thread() or coredump), or will have SEGV raised - * (after exec_mmap()) by search_binary_handlers (see below). + * (after exec_mmap()) by search_binary_handler (see below). */ -int flush_old_exec(struct linux_binprm * bprm) +int begin_new_exec(struct linux_binprm * bprm) { struct task_struct *me = current; int retval; + /* Once we are committed compute the creds */ + retval = bprm_creds_from_file(bprm); + if (retval) + return retval; + + /* + * Ensure all future errors are fatal. + */ + bprm->point_of_no_return = true; + /* * Make this the only thread in the thread group. */ @@ -1317,7 +1350,10 @@ int flush_old_exec(struct linux_binprm * bprm) */ set_mm_exe_file(bprm->mm, bprm->file); + /* If the binary is not readable then enforce mm->dumpable=0 */ would_dump(bprm, bprm->file); + if (bprm->have_execfd) + would_dump(bprm, bprm->executable); /* * Release all of the old mmap stuff @@ -1327,13 +1363,6 @@ int flush_old_exec(struct linux_binprm * bprm) if (retval) goto out; - /* - * After setting bprm->called_exec_mmap (to mark that current is - * using the prepared mm now), we have nothing left of the original - * process. If anything from here on returns an error, the check - * in search_binary_handler() will SEGV current. - */ - bprm->called_exec_mmap = 1; bprm->mm = NULL; #ifdef CONFIG_POSIX_TIMERS @@ -1346,7 +1375,7 @@ int flush_old_exec(struct linux_binprm * bprm) */ retval = unshare_sighand(me); if (retval) - goto out; + goto out_unlock; set_fs(USER_DS); me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | @@ -1361,12 +1390,84 @@ int flush_old_exec(struct linux_binprm * bprm) * undergoing exec(2). */ do_close_on_exec(me->files); + + if (bprm->secureexec) { + /* Make sure parent cannot signal privileged process. */ + me->pdeath_signal = 0; + + /* + * For secureexec, reset the stack limit to sane default to + * avoid bad behavior from the prior rlimits. This has to + * happen before arch_pick_mmap_layout(), which examines + * RLIMIT_STACK, but after the point of no return to avoid + * needing to clean up the change on failure. + */ + if (bprm->rlim_stack.rlim_cur > _STK_LIM) + bprm->rlim_stack.rlim_cur = _STK_LIM; + } + + me->sas_ss_sp = me->sas_ss_size = 0; + + /* + * Figure out dumpability. Note that this checking only of current + * is wrong, but userspace depends on it. This should be testing + * bprm->secureexec instead. + */ + if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || + !(uid_eq(current_euid(), current_uid()) && + gid_eq(current_egid(), current_gid()))) + set_dumpable(current->mm, suid_dumpable); + else + set_dumpable(current->mm, SUID_DUMP_USER); + + perf_event_exec(); + __set_task_comm(me, kbasename(bprm->filename), true); + + /* An exec changes our domain. We are no longer part of the thread + group */ + WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1); + flush_signal_handlers(me, 0); + + /* + * install the new credentials for this executable + */ + security_bprm_committing_creds(bprm); + + commit_creds(bprm->cred); + bprm->cred = NULL; + + /* + * Disable monitoring for regular users + * when executing setuid binaries. Must + * wait until new credentials are committed + * by commit_creds() above + */ + if (get_dumpable(me->mm) != SUID_DUMP_USER) + perf_event_exit_task(me); + /* + * cred_guard_mutex must be held at least to this point to prevent + * ptrace_attach() from altering our determination of the task's + * credentials; any time after this it may be unlocked. + */ + security_bprm_committed_creds(bprm); + + /* Pass the opened binary to the interpreter. */ + if (bprm->have_execfd) { + retval = get_unused_fd_flags(0); + if (retval < 0) + goto out_unlock; + fd_install(retval, bprm->executable); + bprm->executable = NULL; + bprm->execfd = retval; + } return 0; +out_unlock: + mutex_unlock(&me->signal->exec_update_mutex); out: return retval; } -EXPORT_SYMBOL(flush_old_exec); +EXPORT_SYMBOL(begin_new_exec); void would_dump(struct linux_binprm *bprm, struct file *file) { @@ -1391,58 +1492,20 @@ EXPORT_SYMBOL(would_dump); void setup_new_exec(struct linux_binprm * bprm) { - /* - * Once here, prepare_binrpm() will not be called any more, so - * the final state of setuid/setgid/fscaps can be merged into the - * secureexec flag. - */ - bprm->secureexec |= bprm->cap_elevated; - - if (bprm->secureexec) { - /* Make sure parent cannot signal privileged process. */ - current->pdeath_signal = 0; - - /* - * For secureexec, reset the stack limit to sane default to - * avoid bad behavior from the prior rlimits. This has to - * happen before arch_pick_mmap_layout(), which examines - * RLIMIT_STACK, but after the point of no return to avoid - * needing to clean up the change on failure. - */ - if (bprm->rlim_stack.rlim_cur > _STK_LIM) - bprm->rlim_stack.rlim_cur = _STK_LIM; - } - - arch_pick_mmap_layout(current->mm, &bprm->rlim_stack); - - current->sas_ss_sp = current->sas_ss_size = 0; + /* Setup things that can depend upon the personality */ + struct task_struct *me = current; - /* - * Figure out dumpability. Note that this checking only of current - * is wrong, but userspace depends on it. This should be testing - * bprm->secureexec instead. - */ - if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || - !(uid_eq(current_euid(), current_uid()) && - gid_eq(current_egid(), current_gid()))) - set_dumpable(current->mm, suid_dumpable); - else - set_dumpable(current->mm, SUID_DUMP_USER); + arch_pick_mmap_layout(me->mm, &bprm->rlim_stack); arch_setup_new_exec(); - perf_event_exec(); - __set_task_comm(current, kbasename(bprm->filename), true); /* Set the new mm task size. We have to do that late because it may * depend on TIF_32BIT which is only updated in flush_thread() on * some architectures like powerpc */ - current->mm->task_size = TASK_SIZE; - - /* An exec changes our domain. We are no longer part of the thread - group */ - WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1); - flush_signal_handlers(current, 0); + me->mm->task_size = TASK_SIZE; + mutex_unlock(&me->signal->exec_update_mutex); + mutex_unlock(&me->signal->cred_guard_mutex); } EXPORT_SYMBOL(setup_new_exec); @@ -1458,7 +1521,7 @@ EXPORT_SYMBOL(finalize_exec); /* * Prepare credentials and lock ->cred_guard_mutex. - * install_exec_creds() commits the new creds and drops the lock. + * setup_new_exec() commits the new creds and drops the lock. * Or, if exec fails before, free_bprm() should release ->cred and * and unlock. */ @@ -1479,8 +1542,6 @@ static void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { - if (bprm->called_exec_mmap) - mutex_unlock(¤t->signal->exec_update_mutex); mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } @@ -1488,6 +1549,8 @@ static void free_bprm(struct linux_binprm *bprm) allow_write_access(bprm->file); fput(bprm->file); } + if (bprm->executable) + fput(bprm->executable); /* If a binfmt changed the interp, free it. */ if (bprm->interp != bprm->filename) kfree(bprm->interp); @@ -1507,35 +1570,6 @@ int bprm_change_interp(const char *interp, struct linux_binprm *bprm) EXPORT_SYMBOL(bprm_change_interp); /* - * install the new credentials for this executable - */ -void install_exec_creds(struct linux_binprm *bprm) -{ - security_bprm_committing_creds(bprm); - - commit_creds(bprm->cred); - bprm->cred = NULL; - - /* - * Disable monitoring for regular users - * when executing setuid binaries. Must - * wait until new credentials are committed - * by commit_creds() above - */ - if (get_dumpable(current->mm) != SUID_DUMP_USER) - perf_event_exit_task(current); - /* - * cred_guard_mutex must be held at least to this point to prevent - * ptrace_attach() from altering our determination of the task's - * credentials; any time after this it may be unlocked. - */ - security_bprm_committed_creds(bprm); - mutex_unlock(¤t->signal->exec_update_mutex); - mutex_unlock(¤t->signal->cred_guard_mutex); -} -EXPORT_SYMBOL(install_exec_creds); - -/* * determine how safe it is to execute the proposed program * - the caller must hold ->cred_guard_mutex to protect against * PTRACE_ATTACH or seccomp thread-sync @@ -1572,29 +1606,21 @@ static void check_unsafe_exec(struct linux_binprm *bprm) spin_unlock(&p->fs->lock); } -static void bprm_fill_uid(struct linux_binprm *bprm) +static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file) { + /* Handle suid and sgid on files */ struct inode *inode; unsigned int mode; kuid_t uid; kgid_t gid; - /* - * Since this can be called multiple times (via prepare_binprm), - * we must clear any previous work done when setting set[ug]id - * bits from any earlier bprm->file uses (for example when run - * first for a setuid script then again for its interpreter). - */ - bprm->cred->euid = current_euid(); - bprm->cred->egid = current_egid(); - - if (!mnt_may_suid(bprm->file->f_path.mnt)) + if (!mnt_may_suid(file->f_path.mnt)) return; if (task_no_new_privs(current)) return; - inode = bprm->file->f_path.dentry->d_inode; + inode = file->f_path.dentry->d_inode; mode = READ_ONCE(inode->i_mode); if (!(mode & (S_ISUID|S_ISGID))) return; @@ -1625,30 +1651,31 @@ static void bprm_fill_uid(struct linux_binprm *bprm) } /* + * Compute brpm->cred based upon the final binary. + */ +static int bprm_creds_from_file(struct linux_binprm *bprm) +{ + /* Compute creds based on which file? */ + struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file; + + bprm_fill_uid(bprm, file); + return security_bprm_creds_from_file(bprm, file); +} + +/* * Fill the binprm structure from the inode. - * Check permissions, then read the first BINPRM_BUF_SIZE bytes + * Read the first BINPRM_BUF_SIZE bytes * * This may be called multiple times for binary chains (scripts for example). */ -int prepare_binprm(struct linux_binprm *bprm) +static int prepare_binprm(struct linux_binprm *bprm) { - int retval; loff_t pos = 0; - bprm_fill_uid(bprm); - - /* fill in binprm security blob */ - retval = security_bprm_set_creds(bprm); - if (retval) - return retval; - bprm->called_set_creds = 1; - memset(bprm->buf, 0, BINPRM_BUF_SIZE); return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos); } -EXPORT_SYMBOL(prepare_binprm); - /* * Arguments are '\0' separated strings found at the location bprm->p * points to; chop off the first by relocating brpm->p to right after @@ -1694,15 +1721,15 @@ EXPORT_SYMBOL(remove_arg_zero); /* * cycle the list of binary formats handler, until one recognizes the image */ -int search_binary_handler(struct linux_binprm *bprm) +static int search_binary_handler(struct linux_binprm *bprm) { bool need_retry = IS_ENABLED(CONFIG_MODULES); struct linux_binfmt *fmt; int retval; - /* This allows 4 levels of binfmt rewrites before failing hard. */ - if (bprm->recursion_depth > 5) - return -ELOOP; + retval = prepare_binprm(bprm); + if (retval < 0) + return retval; retval = security_bprm_check(bprm); if (retval) @@ -1716,19 +1743,11 @@ int search_binary_handler(struct linux_binprm *bprm) continue; read_unlock(&binfmt_lock); - bprm->recursion_depth++; retval = fmt->load_binary(bprm); - bprm->recursion_depth--; read_lock(&binfmt_lock); put_binfmt(fmt); - if (retval < 0 && bprm->called_exec_mmap) { - /* we got to flush_old_exec() and failed after it */ - read_unlock(&binfmt_lock); - force_sigsegv(SIGSEGV); - return retval; - } - if (retval != -ENOEXEC || !bprm->file) { + if (bprm->point_of_no_return || (retval != -ENOEXEC)) { read_unlock(&binfmt_lock); return retval; } @@ -1747,12 +1766,11 @@ int search_binary_handler(struct linux_binprm *bprm) return retval; } -EXPORT_SYMBOL(search_binary_handler); static int exec_binprm(struct linux_binprm *bprm) { pid_t old_pid, old_vpid; - int ret; + int ret, depth; /* Need to fetch pid before load_binary changes it */ old_pid = current->pid; @@ -1760,15 +1778,38 @@ static int exec_binprm(struct linux_binprm *bprm) old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); rcu_read_unlock(); - ret = search_binary_handler(bprm); - if (ret >= 0) { - audit_bprm(bprm); - trace_sched_process_exec(current, old_pid, bprm); - ptrace_event(PTRACE_EVENT_EXEC, old_vpid); - proc_exec_connector(current); + /* This allows 4 levels of binfmt rewrites before failing hard. */ + for (depth = 0;; depth++) { + struct file *exec; + if (depth > 5) + return -ELOOP; + + ret = search_binary_handler(bprm); + if (ret < 0) + return ret; + if (!bprm->interpreter) + break; + + exec = bprm->file; + bprm->file = bprm->interpreter; + bprm->interpreter = NULL; + + allow_write_access(exec); + if (unlikely(bprm->have_execfd)) { + if (bprm->executable) { + fput(exec); + return -ENOEXEC; + } + bprm->executable = exec; + } else + fput(exec); } - return ret; + audit_bprm(bprm); + trace_sched_process_exec(current, old_pid, bprm); + ptrace_event(PTRACE_EVENT_EXEC, old_vpid); + proc_exec_connector(current); + return 0; } /* @@ -1861,11 +1902,12 @@ static int __do_execve_file(int fd, struct filename *filename, if (retval < 0) goto out; - retval = prepare_binprm(bprm); - if (retval < 0) + /* Set the unchanging part of bprm->cred */ + retval = security_bprm_creds_for_exec(bprm); + if (retval) goto out; - retval = copy_strings_kernel(1, &bprm->filename, bprm); + retval = copy_string_kernel(bprm->filename, bprm); if (retval < 0) goto out; @@ -1897,6 +1939,14 @@ static int __do_execve_file(int fd, struct filename *filename, return retval; out: + /* + * If past the point of no return ensure the the code never + * returns to the userspace process. Use an existing fatal + * signal if present otherwise terminate the process with + * SIGSEGV. + */ + if (bprm->point_of_no_return && !fatal_signal_pending(current)) + force_sigsegv(SIGSEGV); if (bprm->mm) { acct_arg_size(bprm, 0); mmput(bprm->mm); diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 06887492f54b..785ead346543 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -372,10 +372,9 @@ static int exfat_readpage(struct file *file, struct page *page) return mpage_readpage(page, exfat_get_block); } -static int exfat_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned int nr_pages) +static void exfat_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, exfat_get_block); + mpage_readahead(rac, exfat_get_block); } static int exfat_writepage(struct page *page, struct writeback_control *wbc) @@ -502,7 +501,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from) static const struct address_space_operations exfat_aops = { .readpage = exfat_readpage, - .readpages = exfat_readpages, + .readahead = exfat_readahead, .writepage = exfat_writepage, .writepages = exfat_writepages, .write_begin = exfat_write_begin, diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 39c4772e96c9..b4de9a0f170d 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -196,9 +196,7 @@ const struct file_operations ext2_file_operations = { }; const struct inode_operations ext2_file_inode_operations = { -#ifdef CONFIG_EXT2_FS_XATTR .listxattr = ext2_listxattr, -#endif .getattr = ext2_getattr, .setattr = ext2_setattr, .get_acl = ext2_get_acl, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index c885cf7d724b..c8b371c82b4f 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -36,6 +36,7 @@ #include <linux/iomap.h> #include <linux/namei.h> #include <linux/uio.h> +#include <linux/fiemap.h> #include "ext2.h" #include "acl.h" #include "xattr.h" @@ -877,11 +878,9 @@ static int ext2_readpage(struct file *file, struct page *page) return mpage_readpage(page, ext2_get_block); } -static int -ext2_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void ext2_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, ext2_get_block); + mpage_readahead(rac, ext2_get_block); } static int @@ -967,7 +966,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc const struct address_space_operations ext2_aops = { .readpage = ext2_readpage, - .readpages = ext2_readpages, + .readahead = ext2_readahead, .writepage = ext2_writepage, .write_begin = ext2_write_begin, .write_end = ext2_write_end, @@ -981,7 +980,7 @@ const struct address_space_operations ext2_aops = { const struct address_space_operations ext2_nobh_aops = { .readpage = ext2_readpage, - .readpages = ext2_readpages, + .readahead = ext2_readahead, .writepage = ext2_nobh_writepage, .write_begin = ext2_nobh_write_begin, .write_end = nobh_write_end, diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index ccfbbf59e2fc..ba3e3e075891 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -136,9 +136,7 @@ static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode, err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); -#ifdef CONFIG_EXT2_FS_XATTR inode->i_op = &ext2_special_inode_operations; -#endif mark_inode_dirty(inode); err = ext2_add_nondir(dentry, inode); } @@ -413,9 +411,7 @@ const struct inode_operations ext2_dir_inode_operations = { .rmdir = ext2_rmdir, .mknod = ext2_mknod, .rename = ext2_rename, -#ifdef CONFIG_EXT2_FS_XATTR .listxattr = ext2_listxattr, -#endif .getattr = ext2_getattr, .setattr = ext2_setattr, .get_acl = ext2_get_acl, @@ -424,9 +420,7 @@ const struct inode_operations ext2_dir_inode_operations = { }; const struct inode_operations ext2_special_inode_operations = { -#ifdef CONFIG_EXT2_FS_XATTR .listxattr = ext2_listxattr, -#endif .getattr = ext2_getattr, .setattr = ext2_setattr, .get_acl = ext2_get_acl, diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c index 00cdb8679486..948d3a441403 100644 --- a/fs/ext2/symlink.c +++ b/fs/ext2/symlink.c @@ -25,16 +25,12 @@ const struct inode_operations ext2_symlink_inode_operations = { .get_link = page_get_link, .getattr = ext2_getattr, .setattr = ext2_setattr, -#ifdef CONFIG_EXT2_FS_XATTR .listxattr = ext2_listxattr, -#endif }; const struct inode_operations ext2_fast_symlink_inode_operations = { .get_link = simple_get_link, .getattr = ext2_getattr, .setattr = ext2_setattr, -#ifdef CONFIG_EXT2_FS_XATTR .listxattr = ext2_listxattr, -#endif }; diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h index 16272e6ddcf4..7925f596e8e2 100644 --- a/fs/ext2/xattr.h +++ b/fs/ext2/xattr.h @@ -100,6 +100,7 @@ static inline void ext2_xattr_destroy_cache(struct mb_cache *cache) } #define ext2_xattr_handlers NULL +#define ext2_listxattr NULL # endif /* CONFIG_EXT2_FS_XATTR */ diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 2a592e38cdfe..cf9e430514c4 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -99,8 +99,7 @@ config EXT4_DEBUG Enables run-time debugging support for the ext4 filesystem. If you select Y here, then you will be able to turn on debugging - with a command such as: - echo 1 > /sys/module/ext4/parameters/mballoc_debug + using dynamic debug control for mb_debug() / ext_debug() msgs. config EXT4_KUNIT_TESTS tristate "KUnit tests for ext4" diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 8c7bbf3e566d..76f634d185f1 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -215,9 +215,8 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type, value, size, xattr_flags); kfree(value); - if (!error) { + if (!error) set_cached_acl(inode, type, acl); - } return error; } @@ -256,7 +255,7 @@ retry: if (!error && update_mode) { inode->i_mode = mode; inode->i_ctime = current_time(inode); - ext4_mark_inode_dirty(handle, inode); + error = ext4_mark_inode_dirty(handle, inode); } out_stop: ext4_journal_stop(handle); diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index a32e5f7b5385..1ba46d87cdf1 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -903,10 +903,11 @@ ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) return bg_start; if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) - colour = (current->pid % 16) * + colour = (task_pid_nr(current) % 16) * (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); else - colour = (current->pid % 16) * ((last_block - bg_start) / 16); + colour = (task_pid_nr(current) % 16) * + ((last_block - bg_start) / 16); return bg_start + colour; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ad2dbf6e4924..b08841f70b69 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -36,6 +36,7 @@ #include <crypto/hash.h> #include <linux/falloc.h> #include <linux/percpu-rwsem.h> +#include <linux/fiemap.h> #ifdef __KERNEL__ #include <linux/compat.h> #endif @@ -80,14 +81,22 @@ #define ext4_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif + /* + * Turn on EXT_DEBUG to enable ext4_ext_show_path/leaf/move in extents.c + */ +#define EXT_DEBUG__ + /* - * Turn on EXT_DEBUG to get lots of info about extents operations. + * Dynamic printk for controlled extents debugging. */ -#define EXT_DEBUG__ -#ifdef EXT_DEBUG -#define ext_debug(fmt, ...) printk(fmt, ##__VA_ARGS__) +#ifdef CONFIG_EXT4_DEBUG +#define ext_debug(ino, fmt, ...) \ + pr_debug("[%s/%d] EXT4-fs (%s): ino %lu: (%s, %d): %s:" fmt, \ + current->comm, task_pid_nr(current), \ + ino->i_sb->s_id, ino->i_ino, __FILE__, __LINE__, \ + __func__, ##__VA_ARGS__) #else -#define ext_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__) +#define ext_debug(ino, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif /* data type for block offset of block group */ @@ -142,6 +151,8 @@ enum SHIFT_DIRECTION { #define EXT4_MB_USE_ROOT_BLOCKS 0x1000 /* Use blocks from reserved pool */ #define EXT4_MB_USE_RESERVED 0x2000 +/* Do strict check for free blocks while retrying block allocation */ +#define EXT4_MB_STRICT_CHECK 0x4000 struct ext4_allocation_request { /* target inode for block we're allocating */ @@ -171,10 +182,10 @@ struct ext4_allocation_request { * well as to store the information returned by ext4_map_blocks(). It * takes less room on the stack than a struct buffer_head. */ -#define EXT4_MAP_NEW (1 << BH_New) -#define EXT4_MAP_MAPPED (1 << BH_Mapped) -#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten) -#define EXT4_MAP_BOUNDARY (1 << BH_Boundary) +#define EXT4_MAP_NEW BIT(BH_New) +#define EXT4_MAP_MAPPED BIT(BH_Mapped) +#define EXT4_MAP_UNWRITTEN BIT(BH_Unwritten) +#define EXT4_MAP_BOUNDARY BIT(BH_Boundary) #define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\ EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY) @@ -417,7 +428,7 @@ struct flex_groups { /* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */ #define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ #define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ -#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded file */ +#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded directory */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ #define EXT4_FL_USER_VISIBLE 0x705BDFFF /* User visible flags */ @@ -490,6 +501,7 @@ enum { /* 22 was formerly EXT4_INODE_EOFBLOCKS */ EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */ EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */ + EXT4_INODE_CASEFOLD = 30, /* Casefolded directory */ EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */ }; @@ -535,6 +547,7 @@ static inline void ext4_check_flag_values(void) CHECK_FLAG_VALUE(EA_INODE); CHECK_FLAG_VALUE(INLINE_DATA); CHECK_FLAG_VALUE(PROJINHERIT); + CHECK_FLAG_VALUE(CASEFOLD); CHECK_FLAG_VALUE(RESERVED); } @@ -609,8 +622,6 @@ enum { #define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020 /* Don't normalize allocation size (used for fallocate) */ #define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040 - /* Request will not result in inode size update (user for fallocate) */ -#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080 /* Convert written extents to unwritten */ #define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0100 /* Write zeros to newly created written extents */ @@ -632,6 +643,7 @@ enum { */ #define EXT4_EX_NOCACHE 0x40000000 #define EXT4_EX_FORCE_CACHE 0x20000000 +#define EXT4_EX_NOFAIL 0x10000000 /* * Flags used by ext4_free_blocks @@ -1357,11 +1369,9 @@ struct ext4_super_block { */ #define EXT4_MF_MNTDIR_SAMPLED 0x0001 #define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */ -#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004 #ifdef CONFIG_FS_ENCRYPTION -#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \ - EXT4_MF_TEST_DUMMY_ENCRYPTION)) +#define DUMMY_ENCRYPTION_ENABLED(sbi) ((sbi)->s_dummy_enc_ctx.ctx != NULL) #else #define DUMMY_ENCRYPTION_ENABLED(sbi) (0) #endif @@ -1551,6 +1561,9 @@ struct ext4_sb_info { struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; + /* Encryption context for '-o test_dummy_encryption' */ + struct fscrypt_dummy_context s_dummy_enc_ctx; + /* * Barrier between writepages ops and changing any inode's JOURNAL_DATA * or EXTENTS flag. @@ -2050,7 +2063,7 @@ struct ext4_dir_entry_2 { __le32 inode; /* Inode number */ __le16 rec_len; /* Directory entry length */ __u8 name_len; /* Name length */ - __u8 file_type; + __u8 file_type; /* See file type macros EXT4_FT_* below */ char name[EXT4_NAME_LEN]; /* File name */ }; @@ -3316,9 +3329,8 @@ static inline void ext4_set_de_type(struct super_block *sb, } /* readpages.c */ -extern int ext4_mpage_readpages(struct address_space *mapping, - struct list_head *pages, struct page *page, - unsigned nr_pages, bool is_readahead); +extern int ext4_mpage_readpages(struct inode *inode, + struct readahead_control *rac, struct page *page); extern int __init ext4_init_post_read_processing(void); extern void ext4_exit_post_read_processing(void); @@ -3354,7 +3366,7 @@ struct ext4_extent; */ #define EXT_MAX_BLOCKS 0xffffffff -extern int ext4_ext_tree_init(handle_t *handle, struct inode *); +extern void ext4_ext_tree_init(handle_t *handle, struct inode *inode); extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents); extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 1c216fcc202a..44e59881a1f0 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -170,10 +170,13 @@ struct partial_cluster { (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) #define EXT_LAST_INDEX(__hdr__) \ (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) -#define EXT_MAX_EXTENT(__hdr__) \ - (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) +#define EXT_MAX_EXTENT(__hdr__) \ + ((le16_to_cpu((__hdr__)->eh_max)) ? \ + ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ + : 0) #define EXT_MAX_INDEX(__hdr__) \ - (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) + ((le16_to_cpu((__hdr__)->eh_max)) ? \ + ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0) static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) { diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index 4b9002f0e84c..00dc668e052b 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -222,7 +222,10 @@ ext4_mark_iloc_dirty(handle_t *handle, int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); -int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode); +#define ext4_mark_inode_dirty(__h, __i) \ + __ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__) +int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, + const char *func, unsigned int line); int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, @@ -335,12 +338,6 @@ static inline handle_t *__ext4_journal_start(struct inode *inode, handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, int type); -static inline void ext4_journal_free_reserved(handle_t *handle) -{ - if (ext4_handle_valid(handle)) - jbd2_journal_free_reserved(handle); -} - static inline handle_t *ext4_journal_current_handle(void) { return journal_current_handle(); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 2b4b94542e34..7d088ff1e902 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -297,11 +297,14 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode, { struct ext4_ext_path *path = *ppath; int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); + int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO; + + if (nofail) + flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL; return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, - EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | - (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0)); + flags); } static int @@ -487,8 +490,12 @@ __read_extent_tree_block(const char *function, unsigned int line, { struct buffer_head *bh; int err; + gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS; + + if (flags & EXT4_EX_NOFAIL) + gfp_flags |= __GFP_NOFAIL; - bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS); + bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags); if (unlikely(!bh)) return ERR_PTR(-ENOMEM); @@ -600,22 +607,22 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) { int k, l = path->p_depth; - ext_debug("path:"); + ext_debug(inode, "path:"); for (k = 0; k <= l; k++, path++) { if (path->p_idx) { - ext_debug(" %d->%llu", + ext_debug(inode, " %d->%llu", le32_to_cpu(path->p_idx->ei_block), ext4_idx_pblock(path->p_idx)); } else if (path->p_ext) { - ext_debug(" %d:[%d]%d:%llu ", + ext_debug(inode, " %d:[%d]%d:%llu ", le32_to_cpu(path->p_ext->ee_block), ext4_ext_is_unwritten(path->p_ext), ext4_ext_get_actual_len(path->p_ext), ext4_ext_pblock(path->p_ext)); } else - ext_debug(" []"); + ext_debug(inode, " []"); } - ext_debug("\n"); + ext_debug(inode, "\n"); } static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) @@ -631,14 +638,14 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) eh = path[depth].p_hdr; ex = EXT_FIRST_EXTENT(eh); - ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); + ext_debug(inode, "Displaying leaf extents\n"); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { - ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), + ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), ext4_ext_is_unwritten(ex), ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); } - ext_debug("\n"); + ext_debug(inode, "\n"); } static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, @@ -651,10 +658,9 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent_idx *idx; idx = path[level].p_idx; while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { - ext_debug("%d: move %d:%llu in new index %llu\n", level, - le32_to_cpu(idx->ei_block), - ext4_idx_pblock(idx), - newblock); + ext_debug(inode, "%d: move %d:%llu in new index %llu\n", + level, le32_to_cpu(idx->ei_block), + ext4_idx_pblock(idx), newblock); idx++; } @@ -663,7 +669,7 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, ex = path[depth].p_ext; while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { - ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", + ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n", le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex), ext4_ext_is_unwritten(ex), @@ -707,7 +713,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_extent_idx *r, *l, *m; - ext_debug("binsearch for %u(idx): ", block); + ext_debug(inode, "binsearch for %u(idx): ", block); l = EXT_FIRST_INDEX(eh) + 1; r = EXT_LAST_INDEX(eh); @@ -717,13 +723,13 @@ ext4_ext_binsearch_idx(struct inode *inode, r = m - 1; else l = m + 1; - ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), - m, le32_to_cpu(m->ei_block), - r, le32_to_cpu(r->ei_block)); + ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, + le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), + r, le32_to_cpu(r->ei_block)); } path->p_idx = l - 1; - ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), + ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), ext4_idx_pblock(path->p_idx)); #ifdef CHECK_BINSEARCH @@ -774,7 +780,7 @@ ext4_ext_binsearch(struct inode *inode, return; } - ext_debug("binsearch for %u: ", block); + ext_debug(inode, "binsearch for %u: ", block); l = EXT_FIRST_EXTENT(eh) + 1; r = EXT_LAST_EXTENT(eh); @@ -785,13 +791,13 @@ ext4_ext_binsearch(struct inode *inode, r = m - 1; else l = m + 1; - ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), - m, le32_to_cpu(m->ee_block), - r, le32_to_cpu(r->ee_block)); + ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, + le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), + r, le32_to_cpu(r->ee_block)); } path->p_ext = l - 1; - ext_debug(" -> %d:%llu:[%d]%d ", + ext_debug(inode, " -> %d:%llu:[%d]%d ", le32_to_cpu(path->p_ext->ee_block), ext4_ext_pblock(path->p_ext), ext4_ext_is_unwritten(path->p_ext), @@ -816,7 +822,7 @@ ext4_ext_binsearch(struct inode *inode, } -int ext4_ext_tree_init(handle_t *handle, struct inode *inode) +void ext4_ext_tree_init(handle_t *handle, struct inode *inode) { struct ext4_extent_header *eh; @@ -826,7 +832,6 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode) eh->eh_magic = EXT4_EXT_MAGIC; eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); ext4_mark_inode_dirty(handle, inode); - return 0; } struct ext4_ext_path * @@ -838,6 +843,10 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, struct ext4_ext_path *path = orig_path ? *orig_path : NULL; short int depth, i, ppos = 0; int ret; + gfp_t gfp_flags = GFP_NOFS; + + if (flags & EXT4_EX_NOFAIL) + gfp_flags |= __GFP_NOFAIL; eh = ext_inode_hdr(inode); depth = ext_depth(inode); @@ -858,7 +867,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, if (!path) { /* account possible depth increase */ path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), - GFP_NOFS); + gfp_flags); if (unlikely(!path)) return ERR_PTR(-ENOMEM); path[0].p_maxdepth = depth + 1; @@ -871,7 +880,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, ext4_cache_extents(inode, eh); /* walk through the tree */ while (i) { - ext_debug("depth %d: num %d, max %d\n", + ext_debug(inode, "depth %d: num %d, max %d\n", ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); ext4_ext_binsearch_idx(inode, path + ppos, block); @@ -948,18 +957,20 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, if (logical > le32_to_cpu(curp->p_idx->ei_block)) { /* insert after */ - ext_debug("insert new index %d after: %llu\n", logical, ptr); + ext_debug(inode, "insert new index %d after: %llu\n", + logical, ptr); ix = curp->p_idx + 1; } else { /* insert before */ - ext_debug("insert new index %d before: %llu\n", logical, ptr); + ext_debug(inode, "insert new index %d before: %llu\n", + logical, ptr); ix = curp->p_idx; } len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; BUG_ON(len < 0); if (len > 0) { - ext_debug("insert new index %d: " + ext_debug(inode, "insert new index %d: " "move %d indices from 0x%p to 0x%p\n", logical, len, ix, ix + 1); memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); @@ -1008,9 +1019,13 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ext4_fsblk_t newblock, oldblock; __le32 border; ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ + gfp_t gfp_flags = GFP_NOFS; int err = 0; size_t ext_size = 0; + if (flags & EXT4_EX_NOFAIL) + gfp_flags |= __GFP_NOFAIL; + /* make decision: where to split? */ /* FIXME: now decision is simplest: at current extent */ @@ -1022,12 +1037,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { border = path[depth].p_ext[1].ee_block; - ext_debug("leaf will be split." + ext_debug(inode, "leaf will be split." " next leaf starts at %d\n", le32_to_cpu(border)); } else { border = newext->ee_block; - ext_debug("leaf will be added." + ext_debug(inode, "leaf will be added." " next leaf starts at %d\n", le32_to_cpu(border)); } @@ -1044,12 +1059,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, * We need this to handle errors and free blocks * upon them. */ - ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS); + ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags); if (!ablocks) return -ENOMEM; /* allocate all needed blocks */ - ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); + ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at); for (a = 0; a < depth - at; a++) { newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err, flags); @@ -1135,7 +1150,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, goto cleanup; } if (k) - ext_debug("create %d intermediate indices\n", k); + ext_debug(inode, "create %d intermediate indices\n", k); /* insert new index into current index block */ /* current depth stored in i var */ i = depth - 1; @@ -1162,7 +1177,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, fidx->ei_block = border; ext4_idx_store_pblock(fidx, oldblock); - ext_debug("int.index at %d (block %llu): %u -> %llu\n", + ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n", i, newblock, le32_to_cpu(border), oldblock); /* move remainder of path[i] to the new index block */ @@ -1176,7 +1191,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } /* start copy indexes */ m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; - ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, + ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx, EXT_MAX_INDEX(path[i].p_hdr)); ext4_ext_show_move(inode, path, newblock, i); if (m) { @@ -1313,13 +1328,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, EXT_FIRST_INDEX(neh)->ei_block = EXT_FIRST_EXTENT(neh)->ee_block; } - ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", + ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n", le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), ext4_idx_pblock(EXT_FIRST_INDEX(neh))); le16_add_cpu(&neh->eh_depth, 1); - ext4_mark_inode_dirty(handle, inode); + err = ext4_mark_inode_dirty(handle, inode); out: brelse(bh); @@ -1955,7 +1970,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, /* Try to append newex to the ex */ if (ext4_can_extents_be_merged(inode, ex, newext)) { - ext_debug("append [%d]%d block to %u:[%d]%d" + ext_debug(inode, "append [%d]%d block to %u:[%d]%d" "(from %llu)\n", ext4_ext_is_unwritten(newext), ext4_ext_get_actual_len(newext), @@ -1980,7 +1995,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, prepend: /* Try to prepend newex to the ex */ if (ext4_can_extents_be_merged(inode, newext, ex)) { - ext_debug("prepend %u[%d]%d block to %u:[%d]%d" + ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d" "(from %llu)\n", le32_to_cpu(newext->ee_block), ext4_ext_is_unwritten(newext), @@ -2018,20 +2033,20 @@ prepend: if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) next = ext4_ext_next_leaf_block(path); if (next != EXT_MAX_BLOCKS) { - ext_debug("next leaf block - %u\n", next); + ext_debug(inode, "next leaf block - %u\n", next); BUG_ON(npath != NULL); - npath = ext4_find_extent(inode, next, NULL, 0); + npath = ext4_find_extent(inode, next, NULL, gb_flags); if (IS_ERR(npath)) return PTR_ERR(npath); BUG_ON(npath->p_depth != path->p_depth); eh = npath[depth].p_hdr; if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { - ext_debug("next leaf isn't full(%d)\n", + ext_debug(inode, "next leaf isn't full(%d)\n", le16_to_cpu(eh->eh_entries)); path = npath; goto has_space; } - ext_debug("next leaf has no free space(%d,%d)\n", + ext_debug(inode, "next leaf has no free space(%d,%d)\n", le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); } @@ -2057,7 +2072,7 @@ has_space: if (!nearex) { /* there is no extent in this leaf, create first one */ - ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", + ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_unwritten(newext), @@ -2067,7 +2082,7 @@ has_space: if (le32_to_cpu(newext->ee_block) > le32_to_cpu(nearex->ee_block)) { /* Insert after */ - ext_debug("insert %u:%llu:[%d]%d before: " + ext_debug(inode, "insert %u:%llu:[%d]%d before: " "nearest %p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), @@ -2078,7 +2093,7 @@ has_space: } else { /* Insert before */ BUG_ON(newext->ee_block == nearex->ee_block); - ext_debug("insert %u:%llu:[%d]%d after: " + ext_debug(inode, "insert %u:%llu:[%d]%d after: " "nearest %p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), @@ -2088,7 +2103,7 @@ has_space: } len = EXT_LAST_EXTENT(eh) - nearex + 1; if (len > 0) { - ext_debug("insert %u:%llu:[%d]%d: " + ext_debug(inode, "insert %u:%llu:[%d]%d: " "move %d extents from 0x%p to 0x%p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), @@ -2232,7 +2247,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, return; hole_len = min(es.es_lblk - hole_start, hole_len); } - ext_debug(" -> %u:%u\n", hole_start, hole_len); + ext_debug(inode, " -> %u:%u\n", hole_start, hole_len); ext4_es_insert_extent(inode, hole_start, hole_len, ~0, EXTENT_STATUS_HOLE); } @@ -2269,7 +2284,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, err = ext4_ext_dirty(handle, inode, path); if (err) return err; - ext_debug("index is empty, remove it, free block %llu\n", leaf); + ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf); trace_ext4_ext_rm_idx(inode, leaf); ext4_free_blocks(handle, inode, NULL, leaf, 1, @@ -2548,7 +2563,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, ext4_fsblk_t pblk; /* the header must be checked already in ext4_ext_remove_space() */ - ext_debug("truncate since %u in leaf to %u\n", start, end); + ext_debug(inode, "truncate since %u in leaf to %u\n", start, end); if (!path[depth].p_hdr) path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); eh = path[depth].p_hdr; @@ -2574,7 +2589,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, else unwritten = 0; - ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, + ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block, unwritten, ex_ee_len); path[depth].p_ext = ex; @@ -2582,7 +2597,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, b = ex_ee_block+ex_ee_len - 1 < end ? ex_ee_block+ex_ee_len - 1 : end; - ext_debug(" border %u:%u\n", a, b); + ext_debug(inode, " border %u:%u\n", a, b); /* If this extent is beyond the end of the hole, skip it */ if (end < ex_ee_block) { @@ -2691,7 +2706,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, if (err) goto out; - ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, + ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num, ext4_ext_pblock(ex)); ex--; ex_ee_block = le32_to_cpu(ex->ee_block); @@ -2768,7 +2783,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, partial.lblk = 0; partial.state = initial; - ext_debug("truncate since %u to %u\n", start, end); + ext_debug(inode, "truncate since %u to %u\n", start, end); /* probably first extent we're gonna free will be last in block */ handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, @@ -2793,7 +2808,8 @@ again: ext4_fsblk_t pblk; /* find extent for or closest extent to this block */ - path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); + path = ext4_find_extent(inode, end, NULL, + EXT4_EX_NOCACHE | EXT4_EX_NOFAIL); if (IS_ERR(path)) { ext4_journal_stop(handle); return PTR_ERR(path); @@ -2879,7 +2895,7 @@ again: le16_to_cpu(path[k].p_hdr->eh_entries)+1; } else { path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), - GFP_NOFS); + GFP_NOFS | __GFP_NOFAIL); if (path == NULL) { ext4_journal_stop(handle); return -ENOMEM; @@ -2909,7 +2925,7 @@ again: /* this is index block */ if (!path[i].p_hdr) { - ext_debug("initialize header\n"); + ext_debug(inode, "initialize header\n"); path[i].p_hdr = ext_block_hdr(path[i].p_bh); } @@ -2917,7 +2933,7 @@ again: /* this level hasn't been touched yet */ path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; - ext_debug("init index ptr: hdr 0x%p, num %d\n", + ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n", path[i].p_hdr, le16_to_cpu(path[i].p_hdr->eh_entries)); } else { @@ -2925,13 +2941,13 @@ again: path[i].p_idx--; } - ext_debug("level %d - index, first 0x%p, cur 0x%p\n", + ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n", i, EXT_FIRST_INDEX(path[i].p_hdr), path[i].p_idx); if (ext4_ext_more_to_rm(path + i)) { struct buffer_head *bh; /* go to the next level */ - ext_debug("move to level %d (block %llu)\n", + ext_debug(inode, "move to level %d (block %llu)\n", i + 1, ext4_idx_pblock(path[i].p_idx)); memset(path + i + 1, 0, sizeof(*path)); bh = read_extent_tree_block(inode, @@ -2967,7 +2983,7 @@ again: brelse(path[i].p_bh); path[i].p_bh = NULL; i--; - ext_debug("return to level %d\n", i); + ext_debug(inode, "return to level %d\n", i); } } @@ -3135,8 +3151,7 @@ static int ext4_split_extent_at(handle_t *handle, BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); - ext_debug("ext4_split_extents_at: inode %lu, logical" - "block %llu\n", inode->i_ino, (unsigned long long)split); + ext_debug(inode, "logical block %llu\n", (unsigned long long)split); ext4_ext_show_leaf(inode, path); @@ -3244,6 +3259,10 @@ out: fix_extent_len: ex->ee_len = orig_ex.ee_len; + /* + * Ignore ext4_ext_dirty return value since we are already in error path + * and err is a non-zero error code. + */ ext4_ext_dirty(handle, inode, path + path->p_depth); return err; } @@ -3300,7 +3319,7 @@ static int ext4_split_extent(handle_t *handle, * Update path is required because previous ext4_split_extent_at() may * result in split of original leaf or extent zeroout. */ - path = ext4_find_extent(inode, map->m_lblk, ppath, 0); + path = ext4_find_extent(inode, map->m_lblk, ppath, flags); if (IS_ERR(path)) return PTR_ERR(path); depth = ext_depth(inode); @@ -3369,9 +3388,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, int err = 0; int split_flag = EXT4_EXT_DATA_VALID2; - ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" - "block %llu, max_blocks %u\n", inode->i_ino, - (unsigned long long)map->m_lblk, map_len); + ext_debug(inode, "logical block %llu, max_blocks %u\n", + (unsigned long long)map->m_lblk, map_len); sbi = EXT4_SB(inode->i_sb); eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) @@ -3503,7 +3521,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, } if (allocated) { /* Mark the block containing both extents as dirty */ - ext4_ext_dirty(handle, inode, path + depth); + err = ext4_ext_dirty(handle, inode, path + depth); /* Update path to point to the right extent */ path[depth].p_ext = abut_ex; @@ -3623,8 +3641,7 @@ static int ext4_split_convert_extents(handle_t *handle, unsigned int ee_len; int split_flag = 0, depth; - ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n", - __func__, inode->i_ino, + ext_debug(inode, "logical block %llu, max_blocks %u\n", (unsigned long long)map->m_lblk, map->m_len); eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) @@ -3670,8 +3687,7 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); - ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" - "block %llu, max_blocks %u\n", inode->i_ino, + ext_debug(inode, "logical block %llu, max_blocks %u\n", (unsigned long long)ee_block, ee_len); /* If extent is larger than requested it is a clear sign that we still @@ -3741,8 +3757,7 @@ convert_initialized_extent(handle_t *handle, struct inode *inode, ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); - ext_debug("%s: inode %lu, logical" - "block %llu, max_blocks %u\n", __func__, inode->i_ino, + ext_debug(inode, "logical block %llu, max_blocks %u\n", (unsigned long long)ee_block, ee_len); if (ee_block != map->m_lblk || ee_len > map->m_len) { @@ -3794,16 +3809,13 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, struct ext4_ext_path **ppath, int flags, unsigned int allocated, ext4_fsblk_t newblock) { -#ifdef EXT_DEBUG - struct ext4_ext_path *path = *ppath; -#endif + struct ext4_ext_path __maybe_unused *path = *ppath; int ret = 0; int err = 0; - ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical " - "block %llu, max_blocks %u, flags %x, allocated %u\n", - inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, - flags, allocated); + ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n", + (unsigned long long)map->m_lblk, map->m_len, flags, + allocated); ext4_ext_show_leaf(inode, path); /* @@ -3815,39 +3827,38 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, trace_ext4_ext_handle_unwritten_extents(inode, map, flags, allocated, newblock); - /* get_block() before submit the IO, split the extent */ + /* get_block() before submitting IO, split the extent */ if (flags & EXT4_GET_BLOCKS_PRE_IO) { ret = ext4_split_convert_extents(handle, inode, map, ppath, flags | EXT4_GET_BLOCKS_CONVERT); - if (ret <= 0) - goto out; + if (ret < 0) { + err = ret; + goto out2; + } + /* + * shouldn't get a 0 return when splitting an extent unless + * m_len is 0 (bug) or extent has been corrupted + */ + if (unlikely(ret == 0)) { + EXT4_ERROR_INODE(inode, + "unexpected ret == 0, m_len = %u", + map->m_len); + err = -EFSCORRUPTED; + goto out2; + } map->m_flags |= EXT4_MAP_UNWRITTEN; goto out; } /* IO end_io complete, convert the filled extent to written */ if (flags & EXT4_GET_BLOCKS_CONVERT) { - if (flags & EXT4_GET_BLOCKS_ZERO) { - if (allocated > map->m_len) - allocated = map->m_len; - err = ext4_issue_zeroout(inode, map->m_lblk, newblock, - allocated); - if (err < 0) - goto out2; - } - ret = ext4_convert_unwritten_extents_endio(handle, inode, map, + err = ext4_convert_unwritten_extents_endio(handle, inode, map, ppath); - if (ret >= 0) - ext4_update_inode_fsync_trans(handle, inode, 1); - else - err = ret; - map->m_flags |= EXT4_MAP_MAPPED; - map->m_pblk = newblock; - if (allocated > map->m_len) - allocated = map->m_len; - map->m_len = allocated; - goto out2; + if (err < 0) + goto out2; + ext4_update_inode_fsync_trans(handle, inode, 1); + goto map_out; } - /* buffered IO case */ + /* buffered IO cases */ /* * repeat fallocate creation request * we already have an unwritten extent @@ -3870,29 +3881,39 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, goto out1; } - /* buffered write, writepage time, convert*/ + /* + * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1. + * For buffered writes, at writepage time, etc. Convert a + * discovered unwritten extent to written. + */ ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); - if (ret >= 0) - ext4_update_inode_fsync_trans(handle, inode, 1); -out: - if (ret <= 0) { + if (ret < 0) { err = ret; goto out2; - } else - allocated = ret; - map->m_flags |= EXT4_MAP_NEW; - if (allocated > map->m_len) - allocated = map->m_len; - map->m_len = allocated; + } + ext4_update_inode_fsync_trans(handle, inode, 1); + /* + * shouldn't get a 0 return when converting an unwritten extent + * unless m_len is 0 (bug) or extent has been corrupted + */ + if (unlikely(ret == 0)) { + EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u", + map->m_len); + err = -EFSCORRUPTED; + goto out2; + } +out: + allocated = ret; + map->m_flags |= EXT4_MAP_NEW; map_out: map->m_flags |= EXT4_MAP_MAPPED; out1: + map->m_pblk = newblock; if (allocated > map->m_len) allocated = map->m_len; - ext4_ext_show_leaf(inode, path); - map->m_pblk = newblock; map->m_len = allocated; + ext4_ext_show_leaf(inode, path); out2: return err ? err : allocated; } @@ -4024,15 +4045,14 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_ext_path *path = NULL; struct ext4_extent newex, *ex, *ex2; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - ext4_fsblk_t newblock = 0; + ext4_fsblk_t newblock = 0, pblk; int err = 0, depth, ret; unsigned int allocated = 0, offset = 0; unsigned int allocated_clusters = 0; struct ext4_allocation_request ar; ext4_lblk_t cluster_offset; - ext_debug("blocks %u/%u requested for inode %lu\n", - map->m_lblk, map->m_len, inode->i_ino); + ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len); trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); /* find extent for this block */ @@ -4040,7 +4060,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; - goto out2; + goto out; } depth = ext_depth(inode); @@ -4056,7 +4076,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, (unsigned long) map->m_lblk, depth, path[depth].p_block); err = -EFSCORRUPTED; - goto out2; + goto out; } ex = path[depth].p_ext; @@ -4079,8 +4099,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, newblock = map->m_lblk - ee_block + ee_start; /* number of remaining blocks in the extent */ allocated = ee_len - (map->m_lblk - ee_block); - ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, - ee_block, ee_len, newblock); + ext_debug(inode, "%u fit into %u:%d -> %llu\n", + map->m_lblk, ee_block, ee_len, newblock); /* * If the extent is initialized check whether the @@ -4090,8 +4110,14 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { err = convert_initialized_extent(handle, inode, map, &path, &allocated); - goto out2; + goto out; } else if (!ext4_ext_is_unwritten(ex)) { + map->m_flags |= EXT4_MAP_MAPPED; + map->m_pblk = newblock; + if (allocated > map->m_len) + allocated = map->m_len; + map->m_len = allocated; + ext4_ext_show_leaf(inode, path); goto out; } @@ -4102,7 +4128,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, err = ret; else allocated = ret; - goto out2; + goto out; } } @@ -4127,7 +4153,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, map->m_pblk = 0; map->m_len = min_t(unsigned int, map->m_len, hole_len); - goto out2; + goto out; } /* @@ -4151,12 +4177,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ar.lleft = map->m_lblk; err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); if (err) - goto out2; + goto out; ar.lright = map->m_lblk; ex2 = NULL; err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); if (err) - goto out2; + goto out; /* Check if the extent after searching to the right implies a * cluster we can use. */ @@ -4217,17 +4243,18 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ar.flags |= EXT4_MB_USE_RESERVED; newblock = ext4_mb_new_blocks(handle, &ar, &err); if (!newblock) - goto out2; - ext_debug("allocate new block: goal %llu, found %llu/%u\n", - ar.goal, newblock, allocated); + goto out; allocated_clusters = ar.len; ar.len = EXT4_C2B(sbi, ar.len) - offset; + ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n", + ar.goal, newblock, ar.len, allocated); if (ar.len > allocated) ar.len = allocated; got_allocated_blocks: /* try to insert new extent into found leaf and return */ - ext4_ext_store_pblock(&newex, newblock + offset); + pblk = newblock + offset; + ext4_ext_store_pblock(&newex, pblk); newex.ee_len = cpu_to_le16(ar.len); /* Mark unwritten */ if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { @@ -4252,16 +4279,9 @@ got_allocated_blocks: EXT4_C2B(sbi, allocated_clusters), fb_flags); } - goto out2; + goto out; } - /* previous routine could use block we allocated */ - newblock = ext4_ext_pblock(&newex); - allocated = ext4_ext_get_actual_len(&newex); - if (allocated > map->m_len) - allocated = map->m_len; - map->m_flags |= EXT4_MAP_NEW; - /* * Reduce the reserved cluster count to reflect successful deferred * allocation of delayed allocated clusters or direct allocation of @@ -4307,14 +4327,14 @@ got_allocated_blocks: ext4_update_inode_fsync_trans(handle, inode, 1); else ext4_update_inode_fsync_trans(handle, inode, 0); -out: - if (allocated > map->m_len) - allocated = map->m_len; + + map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED); + map->m_pblk = pblk; + map->m_len = ar.len; + allocated = map->m_len; ext4_ext_show_leaf(inode, path); - map->m_flags |= EXT4_MAP_MAPPED; - map->m_pblk = newblock; - map->m_len = allocated; -out2: + +out: ext4_ext_drop_refs(path); kfree(path); @@ -4353,7 +4373,14 @@ retry: } if (err) return err; - return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); +retry_remove_space: + err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); + if (err == -ENOMEM) { + cond_resched(); + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto retry_remove_space; + } + return err; } static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, @@ -4363,7 +4390,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, struct inode *inode = file_inode(file); handle_t *handle; int ret = 0; - int ret2 = 0; + int ret2 = 0, ret3 = 0; int retries = 0; int depth = 0; struct ext4_map_blocks map; @@ -4423,10 +4450,11 @@ retry: if (ext4_update_inode_size(inode, epos) & 0x1) inode->i_mtime = inode->i_ctime; } - ext4_mark_inode_dirty(handle, inode); + ret2 = ext4_mark_inode_dirty(handle, inode); ext4_update_inode_fsync_trans(handle, inode, 1); - ret2 = ext4_journal_stop(handle); - if (ret2) + ret3 = ext4_journal_stop(handle); + ret2 = ret3 ? ret3 : ret2; + if (unlikely(ret2)) break; } if (ret == -ENOSPC && @@ -4490,7 +4518,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, inode_lock(inode); /* - * Indirect files do not support unwritten extnets + * Indirect files do not support unwritten extents */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { ret = -EOPNOTSUPP; @@ -4507,8 +4535,6 @@ static long ext4_zero_range(struct file *file, loff_t offset, } flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; - if (mode & FALLOC_FL_KEEP_SIZE) - flags |= EXT4_GET_BLOCKS_KEEP_SIZE; /* Wait all existing dio workers, newcomers will block on i_mutex */ inode_dio_wait(inode); @@ -4577,7 +4603,9 @@ static long ext4_zero_range(struct file *file, loff_t offset, inode->i_mtime = inode->i_ctime = current_time(inode); if (new_size) ext4_update_inode_size(inode, new_size); - ext4_mark_inode_dirty(handle, inode); + ret = ext4_mark_inode_dirty(handle, inode); + if (unlikely(ret)) + goto out_handle; /* Zero out partial block at the edges of the range */ ret = ext4_zero_partial_blocks(handle, inode, offset, len); @@ -4587,6 +4615,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, if (file->f_flags & O_SYNC) ext4_handle_sync(handle); +out_handle: ext4_journal_stop(handle); out_mutex: inode_unlock(inode); @@ -4647,8 +4676,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; - if (mode & FALLOC_FL_KEEP_SIZE) - flags |= EXT4_GET_BLOCKS_KEEP_SIZE; inode_lock(inode); @@ -4700,8 +4727,7 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, loff_t offset, ssize_t len) { unsigned int max_blocks; - int ret = 0; - int ret2 = 0; + int ret = 0, ret2 = 0, ret3 = 0; struct ext4_map_blocks map; unsigned int blkbits = inode->i_blkbits; unsigned int credits = 0; @@ -4734,9 +4760,13 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, "ext4_ext_map_blocks returned %d", inode->i_ino, map.m_lblk, map.m_len, ret); - ext4_mark_inode_dirty(handle, inode); - if (credits) - ret2 = ext4_journal_stop(handle); + ret2 = ext4_mark_inode_dirty(handle, inode); + if (credits) { + ret3 = ext4_journal_stop(handle); + if (unlikely(ret3)) + ret2 = ret3; + } + if (ret <= 0 || ret2) break; } @@ -4854,11 +4884,9 @@ static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len) return 0; } -static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - __u64 start, __u64 len, bool from_es_cache) +int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 len) { - ext4_lblk_t start_blk; - u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR; int error = 0; if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { @@ -4868,12 +4896,6 @@ static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; } - if (from_es_cache) - ext4_fiemap_flags &= FIEMAP_FLAG_XATTR; - - if (fiemap_check_flags(fieinfo, ext4_fiemap_flags)) - return -EBADR; - /* * For bitmap files the maximum size limit could be smaller than * s_maxbytes, so check len here manually instead of just relying on the @@ -4885,40 +4907,20 @@ static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; - error = iomap_fiemap(inode, fieinfo, start, len, - &ext4_iomap_xattr_ops); - } else if (!from_es_cache) { - error = iomap_fiemap(inode, fieinfo, start, len, - &ext4_iomap_report_ops); - } else { - ext4_lblk_t len_blks; - __u64 last_blk; - - start_blk = start >> inode->i_sb->s_blocksize_bits; - last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; - if (last_blk >= EXT_MAX_BLOCKS) - last_blk = EXT_MAX_BLOCKS-1; - len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; - - /* - * Walk the extent tree gathering extent information - * and pushing extents back to the user. - */ - error = ext4_fill_es_cache_info(inode, start_blk, len_blks, - fieinfo); + return iomap_fiemap(inode, fieinfo, start, len, + &ext4_iomap_xattr_ops); } - return error; -} -int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - __u64 start, __u64 len) -{ - return _ext4_fiemap(inode, fieinfo, start, len, false); + return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops); } int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { + ext4_lblk_t start_blk, len_blks; + __u64 last_blk; + int error = 0; + if (ext4_has_inline_data(inode)) { int has_inline; @@ -4929,9 +4931,33 @@ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, return 0; } - return _ext4_fiemap(inode, fieinfo, start, len, true); -} + if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { + error = ext4_ext_precache(inode); + if (error) + return error; + fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; + } + + error = fiemap_prep(inode, fieinfo, start, &len, 0); + if (error) + return error; + + error = ext4_fiemap_check_ranges(inode, start, &len); + if (error) + return error; + start_blk = start >> inode->i_sb->s_blocksize_bits; + last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; + if (last_blk >= EXT_MAX_BLOCKS) + last_blk = EXT_MAX_BLOCKS-1; + len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; + + /* + * Walk the extent tree gathering extent information + * and pushing extents back to the user. + */ + return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo); +} /* * ext4_access_path: @@ -5304,7 +5330,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) if (IS_SYNC(inode)) ext4_handle_sync(handle); inode->i_mtime = inode->i_ctime = current_time(inode); - ext4_mark_inode_dirty(handle, inode); + ret = ext4_mark_inode_dirty(handle, inode); ext4_update_inode_fsync_trans(handle, inode, 1); out_stop: diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index d996b44d2265..e75171535375 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -1054,7 +1054,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; /* record the first block of the first delonly extent seen */ - if (rc->first_do_lblk_found == false) { + if (!rc->first_do_lblk_found) { rc->first_do_lblk = i; rc->first_do_lblk_found = true; } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 0d624250a62b..2a01e31a032c 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -287,6 +287,7 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, bool truncate = false; u8 blkbits = inode->i_blkbits; ext4_lblk_t written_blk, end_blk; + int ret; /* * Note that EXT4_I(inode)->i_disksize can get extended up to @@ -327,8 +328,14 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, goto truncate; } - if (ext4_update_inode_size(inode, offset + written)) - ext4_mark_inode_dirty(handle, inode); + if (ext4_update_inode_size(inode, offset + written)) { + ret = ext4_mark_inode_dirty(handle, inode); + if (unlikely(ret)) { + written = ret; + ext4_journal_stop(handle); + goto truncate; + } + } /* * We may need to truncate allocated but not written blocks beyond EOF. @@ -495,6 +502,12 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) if (ret <= 0) return ret; + /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */ + if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) { + ret = -EAGAIN; + goto out; + } + offset = iocb->ki_pos; count = ret; diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index e10206e7f4bb..1d668c8f131f 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -44,30 +44,28 @@ */ static int ext4_sync_parent(struct inode *inode) { - struct dentry *dentry = NULL; - struct inode *next; + struct dentry *dentry, *next; int ret = 0; if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) return 0; - inode = igrab(inode); + dentry = d_find_any_alias(inode); + if (!dentry) + return 0; while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); - dentry = d_find_any_alias(inode); - if (!dentry) - break; - next = igrab(d_inode(dentry->d_parent)); + + next = dget_parent(dentry); dput(dentry); - if (!next) - break; - iput(inode); - inode = next; + dentry = next; + inode = dentry->d_inode; + /* * The directory inode may have gone through rmdir by now. But * the inode itself and its blocks are still allocated (we hold - * a reference to the inode so it didn't go through - * ext4_evict_inode()) and so we are safe to flush metadata - * blocks and the inode. + * a reference to the inode via its dentry), so it didn't go + * through ext4_evict_inode()) and so we are safe to flush + * metadata blocks and the inode. */ ret = sync_mapping_buffers(inode->i_mapping); if (ret) @@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode) if (ret) break; } - iput(inode); + dput(dentry); return ret; } @@ -176,7 +174,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ret = ext4_fsync_journal(inode, datasync, &needs_barrier); if (needs_barrier) { - err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); if (!ret) ret = err; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 4b8c9a9bdf0c..54d324e80fe5 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1246,6 +1246,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) ext4_error_err(sb, -err, "couldn't read orphan inode %lu (err %d)", ino, err); + brelse(bitmap_bh); return inode; } @@ -1440,7 +1441,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, if (ret < 0) goto err_out; if (barrier) - blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL); + blkdev_issue_flush(sb->s_bdev, GFP_NOFS); skip_zeroout: ext4_lock_group(sb, group); diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 107f0043f67f..be2b66eb65f7 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -467,7 +467,9 @@ static int ext4_splice_branch(handle_t *handle, /* * OK, we spliced it into the inode itself on a direct block. */ - ext4_mark_inode_dirty(handle, ar->inode); + err = ext4_mark_inode_dirty(handle, ar->inode); + if (unlikely(err)) + goto err_out; jbd_debug(5, "splicing direct\n"); } return err; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index f35e289e17aa..c3a1ad2db122 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1260,7 +1260,7 @@ out: int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode) { - int ret, inline_size, no_expand; + int ret, ret2, inline_size, no_expand; void *inline_start; struct ext4_iloc iloc; @@ -1314,7 +1314,9 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, out: ext4_write_unlock_xattr(dir, &no_expand); - ext4_mark_inode_dirty(handle, dir); + ret2 = ext4_mark_inode_dirty(handle, dir); + if (unlikely(ret2 && !ret)) + ret = ret2; brelse(iloc.bh); return ret; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2a4aae6acdcb..40ec5c7ef0d3 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -221,6 +221,16 @@ void ext4_evict_inode(struct inode *inode) truncate_inode_pages_final(&inode->i_data); /* + * For inodes with journalled data, transaction commit could have + * dirtied the inode. Flush worker is ignoring it because of I_FREEING + * flag but we still need to remove the inode from the writeback lists. + */ + if (!list_empty_careful(&inode->i_io_list)) { + WARN_ON_ONCE(!ext4_should_journal_data(inode)); + inode_io_list_del(inode); + } + + /* * Protect us against freezing - iput() caller didn't have to have any * protection against it */ @@ -432,11 +442,9 @@ static void ext4_map_blocks_es_recheck(handle_t *handle, */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { - retval = ext4_ext_map_blocks(handle, inode, map, flags & - EXT4_GET_BLOCKS_KEEP_SIZE); + retval = ext4_ext_map_blocks(handle, inode, map, 0); } else { - retval = ext4_ind_map_blocks(handle, inode, map, flags & - EXT4_GET_BLOCKS_KEEP_SIZE); + retval = ext4_ind_map_blocks(handle, inode, map, 0); } up_read((&EXT4_I(inode)->i_data_sem)); @@ -493,9 +501,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, #endif map->m_flags = 0; - ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," - "logical block %lu\n", inode->i_ino, flags, map->m_len, - (unsigned long) map->m_lblk); + ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n", + flags, map->m_len, (unsigned long) map->m_lblk); /* * ext4_map_blocks returns an int, and m_len is an unsigned int @@ -541,11 +548,9 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { - retval = ext4_ext_map_blocks(handle, inode, map, flags & - EXT4_GET_BLOCKS_KEEP_SIZE); + retval = ext4_ext_map_blocks(handle, inode, map, 0); } else { - retval = ext4_ind_map_blocks(handle, inode, map, flags & - EXT4_GET_BLOCKS_KEEP_SIZE); + retval = ext4_ind_map_blocks(handle, inode, map, 0); } if (retval > 0) { unsigned int status; @@ -726,6 +731,9 @@ out_sem: return ret; } } + + if (retval < 0) + ext_debug(inode, "failed with err %d\n", retval); return retval; } @@ -1296,7 +1304,7 @@ static int ext4_write_end(struct file *file, * filesystems. */ if (i_size_changed || inline_data) - ext4_mark_inode_dirty(handle, inode); + ret = ext4_mark_inode_dirty(handle, inode); if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied @@ -1526,6 +1534,7 @@ struct mpage_da_data { struct ext4_map_blocks map; struct ext4_io_submit io_submit; /* IO submission data */ unsigned int do_map:1; + unsigned int scanned_until_end:1; }; static void mpage_release_unused_pages(struct mpage_da_data *mpd, @@ -1541,6 +1550,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, if (mpd->first_page >= mpd->next_page) return; + mpd->scanned_until_end = 0; index = mpd->first_page; end = mpd->next_page - 1; if (invalidate) { @@ -1681,8 +1691,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, invalid_block = ~0; map->m_flags = 0; - ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," - "logical block %lu\n", inode->i_ino, map->m_len, + ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len, (unsigned long) map->m_lblk); /* Lookup extent status tree firstly */ @@ -2078,7 +2087,7 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) return err; } -#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) +#define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay)) /* * mballoc gives us at most this number of blocks... @@ -2188,7 +2197,11 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd, if (err < 0) return err; } - return lblk < blocks; + if (lblk >= blocks) { + mpd->scanned_until_end = 1; + return 0; + } + return 1; } /* @@ -2311,7 +2324,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) * mapping, or maybe the page was submitted for IO. * So we return to call further extent mapping. */ - if (err < 0 || map_bh == true) + if (err < 0 || map_bh) goto out; /* Page fully mapped - let IO run! */ err = mpage_submit_page(mpd, page); @@ -2358,7 +2371,7 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) dioread_nolock = ext4_should_dioread_nolock(inode); if (dioread_nolock) get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; - if (map->m_flags & (1 << BH_Delay)) + if (map->m_flags & BIT(BH_Delay)) get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; err = ext4_map_blocks(handle, inode, map, get_blocks_flags); @@ -2546,7 +2559,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, tag); if (nr_pages == 0) - goto out; + break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; @@ -2601,6 +2614,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) pagevec_release(&pvec); cond_resched(); } + mpd->scanned_until_end = 1; return 0; out: pagevec_release(&pvec); @@ -2619,7 +2633,6 @@ static int ext4_writepages(struct address_space *mapping, struct inode *inode = mapping->host; int needed_blocks, rsv_blocks = 0, ret = 0; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); - bool done; struct blk_plug plug; bool give_up_on_write = false; @@ -2705,7 +2718,6 @@ static int ext4_writepages(struct address_space *mapping, retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); - done = false; blk_start_plug(&plug); /* @@ -2715,6 +2727,7 @@ retry: * started. */ mpd.do_map = 0; + mpd.scanned_until_end = 0; mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); if (!mpd.io_submit.io_end) { ret = -ENOMEM; @@ -2730,7 +2743,7 @@ retry: if (ret < 0) goto unplug; - while (!done && mpd.first_page <= mpd.last_page) { + while (!mpd.scanned_until_end && wbc->nr_to_write > 0) { /* For each extent of pages we use new io_end */ mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); if (!mpd.io_submit.io_end) { @@ -2765,20 +2778,9 @@ retry: trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); ret = mpage_prepare_extent_to_map(&mpd); - if (!ret) { - if (mpd.map.m_len) - ret = mpage_map_and_submit_extent(handle, &mpd, + if (!ret && mpd.map.m_len) + ret = mpage_map_and_submit_extent(handle, &mpd, &give_up_on_write); - else { - /* - * We scanned the whole range (or exhausted - * nr_to_write), submitted what was mapped and - * didn't find anything needing mapping. We are - * done. - */ - done = true; - } - } /* * Caution: If the handle is synchronous, * ext4_journal_stop() can wait for transaction commit @@ -3077,7 +3079,7 @@ static int ext4_da_write_end(struct file *file, * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) */ - ext4_mark_inode_dirty(handle, inode); + ret = ext4_mark_inode_dirty(handle, inode); } } @@ -3094,7 +3096,7 @@ static int ext4_da_write_end(struct file *file, if (ret2 < 0) ret = ret2; ret2 = ext4_journal_stop(handle); - if (!ret) + if (unlikely(ret2 && !ret)) ret = ret2; return ret ? ret : copied; @@ -3224,23 +3226,20 @@ static int ext4_readpage(struct file *file, struct page *page) ret = ext4_readpage_inline(inode, page); if (ret == -EAGAIN) - return ext4_mpage_readpages(page->mapping, NULL, page, 1, - false); + return ext4_mpage_readpages(inode, NULL, page); return ret; } -static int -ext4_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void ext4_readahead(struct readahead_control *rac) { - struct inode *inode = mapping->host; + struct inode *inode = rac->mapping->host; - /* If the file has inline data, no need to do readpages. */ + /* If the file has inline data, no need to do readahead. */ if (ext4_has_inline_data(inode)) - return 0; + return; - return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true); + ext4_mpage_readpages(inode, rac, NULL); } static void ext4_invalidatepage(struct page *page, unsigned int offset, @@ -3605,7 +3604,7 @@ static int ext4_set_page_dirty(struct page *page) static const struct address_space_operations ext4_aops = { .readpage = ext4_readpage, - .readpages = ext4_readpages, + .readahead = ext4_readahead, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_write_begin, @@ -3622,7 +3621,7 @@ static const struct address_space_operations ext4_aops = { static const struct address_space_operations ext4_journalled_aops = { .readpage = ext4_readpage, - .readpages = ext4_readpages, + .readahead = ext4_readahead, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_write_begin, @@ -3638,7 +3637,7 @@ static const struct address_space_operations ext4_journalled_aops = { static const struct address_space_operations ext4_da_aops = { .readpage = ext4_readpage, - .readpages = ext4_readpages, + .readahead = ext4_readahead, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_da_write_begin, @@ -3886,6 +3885,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, loff_t len) { handle_t *handle; + int ret; + loff_t size = i_size_read(inode); WARN_ON(!inode_is_locked(inode)); @@ -3899,10 +3900,10 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, if (IS_ERR(handle)) return PTR_ERR(handle); ext4_update_i_disksize(inode, size); - ext4_mark_inode_dirty(handle, inode); + ret = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); - return 0; + return ret; } static void ext4_wait_dax_page(struct ext4_inode_info *ei) @@ -3954,7 +3955,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) loff_t first_block_offset, last_block_offset; handle_t *handle; unsigned int credits; - int ret = 0; + int ret = 0, ret2 = 0; trace_ext4_punch_hole(inode, offset, length, 0); @@ -4077,7 +4078,9 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) ext4_handle_sync(handle); inode->i_mtime = inode->i_ctime = current_time(inode); - ext4_mark_inode_dirty(handle, inode); + ret2 = ext4_mark_inode_dirty(handle, inode); + if (unlikely(ret2)) + ret = ret2; if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); out_stop: @@ -4146,7 +4149,7 @@ int ext4_truncate(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); unsigned int credits; - int err = 0; + int err = 0, err2; handle_t *handle; struct address_space *mapping = inode->i_mapping; @@ -4234,7 +4237,9 @@ out_stop: ext4_orphan_del(handle, inode); inode->i_mtime = inode->i_ctime = current_time(inode); - ext4_mark_inode_dirty(handle, inode); + err2 = ext4_mark_inode_dirty(handle, inode); + if (unlikely(err2 && !err)) + err = err2; ext4_journal_stop(handle); trace_ext4_truncate_exit(inode); @@ -4860,21 +4865,22 @@ static int ext4_inode_blocks_set(handle_t *handle, return 0; } -struct other_inode { - unsigned long orig_ino; - struct ext4_inode *raw_inode; -}; - -static int other_inode_match(struct inode * inode, unsigned long ino, - void *data) +static void __ext4_update_other_inode_time(struct super_block *sb, + unsigned long orig_ino, + unsigned long ino, + struct ext4_inode *raw_inode) { - struct other_inode *oi = (struct other_inode *) data; + struct inode *inode; - if ((inode->i_ino != ino) || - (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | + inode = find_inode_by_ino_rcu(sb, ino); + if (!inode) + return; + + if ((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | I_DIRTY_INODE)) || ((inode->i_state & I_DIRTY_TIME) == 0)) - return 0; + return; + spin_lock(&inode->i_lock); if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | I_DIRTY_INODE)) == 0) && @@ -4885,16 +4891,15 @@ static int other_inode_match(struct inode * inode, unsigned long ino, spin_unlock(&inode->i_lock); spin_lock(&ei->i_raw_lock); - EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); - EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); - EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); - ext4_inode_csum_set(inode, oi->raw_inode, ei); + EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); + EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); + EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); + ext4_inode_csum_set(inode, raw_inode, ei); spin_unlock(&ei->i_raw_lock); - trace_ext4_other_inode_update_time(inode, oi->orig_ino); - return -1; + trace_ext4_other_inode_update_time(inode, orig_ino); + return; } spin_unlock(&inode->i_lock); - return -1; } /* @@ -4904,24 +4909,24 @@ static int other_inode_match(struct inode * inode, unsigned long ino, static void ext4_update_other_inodes_time(struct super_block *sb, unsigned long orig_ino, char *buf) { - struct other_inode oi; unsigned long ino; int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; int inode_size = EXT4_INODE_SIZE(sb); - oi.orig_ino = orig_ino; /* * Calculate the first inode in the inode table block. Inode * numbers are one-based. That is, the first inode in a block * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). */ ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; + rcu_read_lock(); for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { if (ino == orig_ino) continue; - oi.raw_inode = (struct ext4_inode *) buf; - (void) find_inode_nowait(sb, ino, other_inode_match, &oi); + __ext4_update_other_inode_time(sb, orig_ino, ino, + (struct ext4_inode *)buf); } + rcu_read_unlock(); } /* @@ -5292,6 +5297,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) inode->i_gid = attr->ia_gid; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); + if (unlikely(error)) + return error; } if (attr->ia_valid & ATTR_SIZE) { @@ -5777,7 +5784,8 @@ out_unlock: * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. */ -int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) +int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, + const char *func, unsigned int line) { struct ext4_iloc iloc; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); @@ -5787,13 +5795,18 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) trace_ext4_mark_inode_dirty(inode, _RET_IP_); err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) - return err; + goto out; if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize) ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize, iloc, handle); - return ext4_mark_iloc_dirty(handle, inode, &iloc); + err = ext4_mark_iloc_dirty(handle, inode, &iloc); +out: + if (unlikely(err)) + ext4_error_inode_err(inode, func, line, 0, err, + "mark_inode_dirty error"); + return err; } /* diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 0746532ba463..2162db0c747d 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -754,14 +754,6 @@ static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg) fieinfo.fi_extents_max = fiemap.fm_extent_count; fieinfo.fi_extents_start = ufiemap->fm_extents; - if (fiemap.fm_extent_count != 0 && - !access_ok(fieinfo.fi_extents_start, - fieinfo.fi_extents_max * sizeof(struct fiemap_extent))) - return -EFAULT; - - if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC) - filemap_write_and_wait(inode->i_mapping); - error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start, fiemap.fm_length); fiemap.fm_flags = fieinfo.fi_flags; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 30d5d97548c4..a9083113a8c0 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -18,13 +18,6 @@ #include <linux/backing-dev.h> #include <trace/events/ext4.h> -#ifdef CONFIG_EXT4_DEBUG -ushort ext4_mballoc_debug __read_mostly; - -module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); -MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); -#endif - /* * MUSTDO: * - test ext4_ext_search_left() and ext4_ext_search_right() @@ -356,6 +349,36 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group); +static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); + +/* + * The algorithm using this percpu seq counter goes below: + * 1. We sample the percpu discard_pa_seq counter before trying for block + * allocation in ext4_mb_new_blocks(). + * 2. We increment this percpu discard_pa_seq counter when we either allocate + * or free these blocks i.e. while marking those blocks as used/free in + * mb_mark_used()/mb_free_blocks(). + * 3. We also increment this percpu seq counter when we successfully identify + * that the bb_prealloc_list is not empty and hence proceed for discarding + * of those PAs inside ext4_mb_discard_group_preallocations(). + * + * Now to make sure that the regular fast path of block allocation is not + * affected, as a small optimization we only sample the percpu seq counter + * on that cpu. Only when the block allocation fails and when freed blocks + * found were 0, that is when we sample percpu seq counter for all cpus using + * below function ext4_get_discard_pa_seq_sum(). This happens after making + * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. + */ +static DEFINE_PER_CPU(u64, discard_pa_seq); +static inline u64 ext4_get_discard_pa_seq_sum(void) +{ + int __cpu; + u64 __seq = 0; + + for_each_possible_cpu(__cpu) + __seq += per_cpu(discard_pa_seq, __cpu); + return __seq; +} static inline void *mb_correct_addr_and_bit(int *bit, void *addr) { @@ -493,6 +516,8 @@ static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { + if (unlikely(e4b->bd_info->bb_bitmap == NULL)) + return; if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { unsigned char *b1, *b2; int i; @@ -511,6 +536,31 @@ static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) } } +static void mb_group_bb_bitmap_alloc(struct super_block *sb, + struct ext4_group_info *grp, ext4_group_t group) +{ + struct buffer_head *bh; + + grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); + if (!grp->bb_bitmap) + return; + + bh = ext4_read_block_bitmap(sb, group); + if (IS_ERR_OR_NULL(bh)) { + kfree(grp->bb_bitmap); + grp->bb_bitmap = NULL; + return; + } + + memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); + put_bh(bh); +} + +static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) +{ + kfree(grp->bb_bitmap); +} + #else static inline void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) @@ -526,6 +576,17 @@ static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { return; } + +static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, + struct ext4_group_info *grp, ext4_group_t group) +{ + return; +} + +static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) +{ + return; +} #endif #ifdef AGGRESSIVE_CHECK @@ -820,14 +881,14 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) char *bitmap; struct ext4_group_info *grinfo; - mb_debug(1, "init page %lu\n", page->index); - inode = page->mapping->host; sb = inode->i_sb; ngroups = ext4_get_groups_count(sb); blocksize = i_blocksize(inode); blocks_per_page = PAGE_SIZE / blocksize; + mb_debug(sb, "init page %lu\n", page->index); + groups_per_page = blocks_per_page >> 1; if (groups_per_page == 0) groups_per_page = 1; @@ -867,7 +928,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) bh[i] = NULL; goto out; } - mb_debug(1, "read bitmap for group %u\n", group); + mb_debug(sb, "read bitmap for group %u\n", group); } /* wait for I/O completion */ @@ -912,7 +973,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) if ((first_block + i) & 1) { /* this is block of buddy */ BUG_ON(incore == NULL); - mb_debug(1, "put buddy for group %u in page %lu/%x\n", + mb_debug(sb, "put buddy for group %u in page %lu/%x\n", group, page->index, i * blocksize); trace_ext4_mb_buddy_bitmap_load(sb, group); grinfo = ext4_get_group_info(sb, group); @@ -932,7 +993,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) } else { /* this is block of bitmap */ BUG_ON(incore != NULL); - mb_debug(1, "put bitmap for group %u in page %lu/%x\n", + mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", group, page->index, i * blocksize); trace_ext4_mb_bitmap_load(sb, group); @@ -1038,7 +1099,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) int ret = 0; might_sleep(); - mb_debug(1, "init group %u\n", group); + mb_debug(sb, "init group %u\n", group); this_grp = ext4_get_group_info(sb, group); /* * This ensures that we don't reinit the buddy cache @@ -1110,7 +1171,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, struct inode *inode = sbi->s_buddy_cache; might_sleep(); - mb_debug(1, "load group %u\n", group); + mb_debug(sb, "load group %u\n", group); blocks_per_page = PAGE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); @@ -1430,6 +1491,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, mb_check_buddy(e4b); mb_free_blocks_double(inode, e4b, first, count); + this_cpu_inc(discard_pa_seq); e4b->bd_info->bb_free += count; if (first < e4b->bd_info->bb_first_free) e4b->bd_info->bb_first_free = first; @@ -1571,6 +1633,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) mb_check_buddy(e4b); mb_mark_used_double(e4b, start, len); + this_cpu_inc(discard_pa_seq); e4b->bd_info->bb_free -= len; if (e4b->bd_info->bb_first_free == start) e4b->bd_info->bb_first_free += len; @@ -1670,6 +1733,14 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, sbi->s_mb_last_start = ac->ac_f_ex.fe_start; spin_unlock(&sbi->s_md_lock); } + /* + * As we've just preallocated more space than + * user requested originally, we store allocated + * space in a special descriptor. + */ + if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) + ext4_mb_new_preallocation(ac); + } /* @@ -1918,7 +1989,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, ext4_mb_use_best_found(ac, e4b); - BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); + BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); if (EXT4_SB(sb)->s_mb_stats) atomic_inc(&EXT4_SB(sb)->s_bal_2orders); @@ -2035,15 +2106,14 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, } /* - * This is now called BEFORE we load the buddy bitmap. + * This is also called BEFORE we load the buddy bitmap. * Returns either 1 or 0 indicating that the group is either suitable - * for the allocation or not. In addition it can also return negative - * error code when something goes wrong. + * for the allocation or not. */ -static int ext4_mb_good_group(struct ext4_allocation_context *ac, +static bool ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, int cr) { - unsigned free, fragments; + ext4_grpblk_t free, fragments; int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); @@ -2051,23 +2121,16 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac, free = grp->bb_free; if (free == 0) - return 0; + return false; if (cr <= 2 && free < ac->ac_g_ex.fe_len) - return 0; + return false; if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) - return 0; - - /* We only do this if the grp has never been initialized */ - if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { - int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); - if (ret) - return ret; - } + return false; fragments = grp->bb_fragments; if (fragments == 0) - return 0; + return false; switch (cr) { case 0: @@ -2077,38 +2140,80 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac, if ((ac->ac_flags & EXT4_MB_HINT_DATA) && (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && ((group % flex_size) == 0)) - return 0; + return false; if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || (free / fragments) >= ac->ac_g_ex.fe_len) - return 1; + return true; if (grp->bb_largest_free_order < ac->ac_2order) - return 0; + return false; - return 1; + return true; case 1: if ((free / fragments) >= ac->ac_g_ex.fe_len) - return 1; + return true; break; case 2: if (free >= ac->ac_g_ex.fe_len) - return 1; + return true; break; case 3: - return 1; + return true; default: BUG(); } - return 0; + return false; +} + +/* + * This could return negative error code if something goes wrong + * during ext4_mb_init_group(). This should not be called with + * ext4_lock_group() held. + */ +static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, + ext4_group_t group, int cr) +{ + struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); + struct super_block *sb = ac->ac_sb; + bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; + ext4_grpblk_t free; + int ret = 0; + + if (should_lock) + ext4_lock_group(sb, group); + free = grp->bb_free; + if (free == 0) + goto out; + if (cr <= 2 && free < ac->ac_g_ex.fe_len) + goto out; + if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) + goto out; + if (should_lock) + ext4_unlock_group(sb, group); + + /* We only do this if the grp has never been initialized */ + if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { + ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); + if (ret) + return ret; + } + + if (should_lock) + ext4_lock_group(sb, group); + ret = ext4_mb_good_group(ac, group, cr); +out: + if (should_lock) + ext4_unlock_group(sb, group); + return ret; } static noinline_for_stack int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { ext4_group_t ngroups, group, i; - int cr; + int cr = -1; int err = 0, first_err = 0; struct ext4_sb_info *sbi; struct super_block *sb; @@ -2189,7 +2294,7 @@ repeat: group = 0; /* This now checks without needing the buddy page */ - ret = ext4_mb_good_group(ac, group, cr); + ret = ext4_mb_good_group_nolock(ac, group, cr); if (ret <= 0) { if (!first_err) first_err = ret; @@ -2207,11 +2312,9 @@ repeat: * block group */ ret = ext4_mb_good_group(ac, group, cr); - if (ret <= 0) { + if (ret == 0) { ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); - if (!first_err) - first_err = ret; continue; } @@ -2260,6 +2363,10 @@ repeat: out: if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) err = first_err; + + mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", + ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, + ac->ac_flags, cr, err); return err; } @@ -2452,20 +2559,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, meta_group_info[i]->bb_free_root = RB_ROOT; meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ -#ifdef DOUBLE_CHECK - { - struct buffer_head *bh; - meta_group_info[i]->bb_bitmap = - kmalloc(sb->s_blocksize, GFP_NOFS); - BUG_ON(meta_group_info[i]->bb_bitmap == NULL); - bh = ext4_read_block_bitmap(sb, group); - BUG_ON(IS_ERR_OR_NULL(bh)); - memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, - sb->s_blocksize); - put_bh(bh); - } -#endif - + mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); return 0; exit_group_info: @@ -2702,7 +2796,7 @@ out: } /* need to called with the ext4 group lock held */ -static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) +static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) { struct ext4_prealloc_space *pa; struct list_head *cur, *tmp; @@ -2714,9 +2808,7 @@ static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) count++; kmem_cache_free(ext4_pspace_cachep, pa); } - if (count) - mb_debug(1, "mballoc: %u PAs left\n", count); - + return count; } int ext4_mb_release(struct super_block *sb) @@ -2727,16 +2819,18 @@ int ext4_mb_release(struct super_block *sb) struct ext4_group_info *grinfo, ***group_info; struct ext4_sb_info *sbi = EXT4_SB(sb); struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); + int count; if (sbi->s_group_info) { for (i = 0; i < ngroups; i++) { cond_resched(); grinfo = ext4_get_group_info(sb, i); -#ifdef DOUBLE_CHECK - kfree(grinfo->bb_bitmap); -#endif + mb_group_bb_bitmap_free(grinfo); ext4_lock_group(sb, i); - ext4_mb_cleanup_pa(grinfo); + count = ext4_mb_cleanup_pa(grinfo); + if (count) + mb_debug(sb, "mballoc: %d PAs left\n", + count); ext4_unlock_group(sb, i); kmem_cache_free(cachep, grinfo); } @@ -2809,7 +2903,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb, struct ext4_group_info *db; int err, count = 0, count2 = 0; - mb_debug(1, "gonna free %u blocks in group %u (0x%p):", + mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", entry->efd_count, entry->efd_group, entry); err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); @@ -2849,7 +2943,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb, kmem_cache_free(ext4_free_data_cachep, entry); ext4_mb_unload_buddy(&e4b); - mb_debug(1, "freed %u blocks in %u structures\n", count, count2); + mb_debug(sb, "freed %d blocks in %d structures\n", count, + count2); } /* @@ -2909,23 +3004,26 @@ int __init ext4_init_mballoc(void) ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, SLAB_RECLAIM_ACCOUNT); if (ext4_pspace_cachep == NULL) - return -ENOMEM; + goto out; ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, SLAB_RECLAIM_ACCOUNT); - if (ext4_ac_cachep == NULL) { - kmem_cache_destroy(ext4_pspace_cachep); - return -ENOMEM; - } + if (ext4_ac_cachep == NULL) + goto out_pa_free; ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT); - if (ext4_free_data_cachep == NULL) { - kmem_cache_destroy(ext4_pspace_cachep); - kmem_cache_destroy(ext4_ac_cachep); - return -ENOMEM; - } + if (ext4_free_data_cachep == NULL) + goto out_ac_free; + return 0; + +out_ac_free: + kmem_cache_destroy(ext4_ac_cachep); +out_pa_free: + kmem_cache_destroy(ext4_pspace_cachep); +out: + return -ENOMEM; } void ext4_exit_mballoc(void) @@ -3077,8 +3175,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) BUG_ON(lg == NULL); ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; - mb_debug(1, "#%u: goal %u blocks for locality group\n", - current->pid, ac->ac_g_ex.fe_len); + mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); } /* @@ -3276,8 +3373,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } - mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, - (unsigned) orig_size, (unsigned) start); + mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, + orig_size, start); } static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) @@ -3366,7 +3463,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, BUG_ON(pa->pa_free < len); pa->pa_free -= len; - mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); + mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); } /* @@ -3390,7 +3487,8 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, * in on-disk bitmap -- see ext4_mb_release_context() * Other CPUs are prevented from allocating from this pa by lg_mutex */ - mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); + mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", + pa->pa_lstart-len, len, pa); } /* @@ -3425,7 +3523,7 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block, /* * search goal blocks in preallocated space */ -static noinline_for_stack int +static noinline_for_stack bool ext4_mb_use_preallocated(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); @@ -3437,7 +3535,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) /* only data can be preallocated */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) - return 0; + return false; /* first, try per-file preallocation */ rcu_read_lock(); @@ -3464,7 +3562,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) spin_unlock(&pa->pa_lock); ac->ac_criteria = 10; rcu_read_unlock(); - return 1; + return true; } spin_unlock(&pa->pa_lock); } @@ -3472,12 +3570,12 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) /* can we use group allocation? */ if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) - return 0; + return false; /* inode may have no locality group for some reason */ lg = ac->ac_lg; if (lg == NULL) - return 0; + return false; order = fls(ac->ac_o_ex.fe_len) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ @@ -3506,9 +3604,9 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) if (cpa) { ext4_mb_use_group_pa(ac, cpa); ac->ac_criteria = 20; - return 1; + return true; } - return 0; + return false; } /* @@ -3573,7 +3671,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_set_bits(bitmap, start, len); preallocated += len; } - mb_debug(1, "preallocated %u for group %u\n", preallocated, group); + mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); } static void ext4_mb_pa_callback(struct rcu_head *head) @@ -3649,7 +3747,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, /* * creates new preallocated space for given inode */ -static noinline_for_stack int +static noinline_for_stack void ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; @@ -3662,10 +3760,9 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); + BUG_ON(ac->ac_pa == NULL); - pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); - if (pa == NULL) - return -ENOMEM; + pa = ac->ac_pa; if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { int winl; @@ -3709,15 +3806,14 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; - atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_INODE_PA; - mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, - pa->pa_pstart, pa->pa_len, pa->pa_lstart); + mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, + pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_inode_pa(ac, pa); ext4_mb_use_inode_pa(ac, pa); @@ -3729,21 +3825,17 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) pa->pa_obj_lock = &ei->i_prealloc_lock; pa->pa_inode = ac->ac_inode; - ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); - ext4_unlock_group(sb, ac->ac_b_ex.fe_group); spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); - - return 0; } /* * creates new preallocated space for locality group inodes belongs to */ -static noinline_for_stack int +static noinline_for_stack void ext4_mb_new_group_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; @@ -3755,11 +3847,9 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); + BUG_ON(ac->ac_pa == NULL); - BUG_ON(ext4_pspace_cachep == NULL); - pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); - if (pa == NULL) - return -ENOMEM; + pa = ac->ac_pa; /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ @@ -3769,15 +3859,14 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) pa->pa_lstart = pa->pa_pstart; pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; - atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_GROUP_PA; - mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, - pa->pa_pstart, pa->pa_len, pa->pa_lstart); + mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, + pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_group_pa(ac, pa); ext4_mb_use_group_pa(ac, pa); @@ -3790,26 +3879,20 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) pa->pa_obj_lock = &lg->lg_prealloc_lock; pa->pa_inode = NULL; - ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); - ext4_unlock_group(sb, ac->ac_b_ex.fe_group); /* * We will later add the new pa to the right bucket * after updating the pa_free in ext4_mb_release_context */ - return 0; } -static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) +static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) { - int err; - if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) - err = ext4_mb_new_group_pa(ac); + ext4_mb_new_group_pa(ac); else - err = ext4_mb_new_inode_pa(ac); - return err; + ext4_mb_new_inode_pa(ac); } /* @@ -3844,7 +3927,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, if (bit >= end) break; next = mb_find_next_bit(bitmap_bh->b_data, end, bit); - mb_debug(1, " free preallocated %u/%u in group %u\n", + mb_debug(sb, "free preallocated %u/%u in group %u\n", (unsigned) ext4_group_first_block_no(sb, group) + bit, (unsigned) next - bit, (unsigned) group); free += next - bit; @@ -3858,10 +3941,10 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, } if (free != pa->pa_free) { ext4_msg(e4b->bd_sb, KERN_CRIT, - "pa %p: logic %lu, phys. %lu, len %lu", + "pa %p: logic %lu, phys. %lu, len %d", pa, (unsigned long) pa->pa_lstart, (unsigned long) pa->pa_pstart, - (unsigned long) pa->pa_len); + pa->pa_len); ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", free, pa->pa_free); /* @@ -3915,10 +3998,9 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, int busy = 0; int free = 0; - mb_debug(1, "discard preallocation for group %u\n", group); - + mb_debug(sb, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) - return 0; + goto out_dbg; bitmap_bh = ext4_read_block_bitmap(sb, group); if (IS_ERR(bitmap_bh)) { @@ -3926,7 +4008,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", err, group); - return 0; + goto out_dbg; } err = ext4_mb_load_buddy(sb, group, &e4b); @@ -3934,7 +4016,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_warning(sb, "Error %d loading buddy information for %u", err, group); put_bh(bitmap_bh); - return 0; + goto out_dbg; } if (needed == 0) @@ -3943,6 +4025,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, INIT_LIST_HEAD(&list); repeat: ext4_lock_group(sb, group); + this_cpu_inc(discard_pa_seq); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); @@ -3979,6 +4062,8 @@ repeat: /* found anything to free? */ if (list_empty(&list)) { BUG_ON(free != 0); + mb_debug(sb, "Someone else may have freed PA for this group %u\n", + group); goto out; } @@ -4003,6 +4088,9 @@ out: ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); +out_dbg: + mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", + free, group, grp->bb_free); return free; } @@ -4031,7 +4119,8 @@ void ext4_discard_preallocations(struct inode *inode) return; } - mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); + mb_debug(sb, "discard preallocation for inode %lu\n", + inode->i_ino); trace_ext4_discard_preallocations(inode); INIT_LIST_HEAD(&list); @@ -4119,22 +4208,74 @@ repeat: } } +static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) +{ + struct ext4_prealloc_space *pa; + + BUG_ON(ext4_pspace_cachep == NULL); + pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); + if (!pa) + return -ENOMEM; + atomic_set(&pa->pa_count, 1); + ac->ac_pa = pa; + return 0; +} + +static void ext4_mb_pa_free(struct ext4_allocation_context *ac) +{ + struct ext4_prealloc_space *pa = ac->ac_pa; + + BUG_ON(!pa); + ac->ac_pa = NULL; + WARN_ON(!atomic_dec_and_test(&pa->pa_count)); + kmem_cache_free(ext4_pspace_cachep, pa); +} + #ifdef CONFIG_EXT4_DEBUG +static inline void ext4_mb_show_pa(struct super_block *sb) +{ + ext4_group_t i, ngroups; + + if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) + return; + + ngroups = ext4_get_groups_count(sb); + mb_debug(sb, "groups: "); + for (i = 0; i < ngroups; i++) { + struct ext4_group_info *grp = ext4_get_group_info(sb, i); + struct ext4_prealloc_space *pa; + ext4_grpblk_t start; + struct list_head *cur; + ext4_lock_group(sb, i); + list_for_each(cur, &grp->bb_prealloc_list) { + pa = list_entry(cur, struct ext4_prealloc_space, + pa_group_list); + spin_lock(&pa->pa_lock); + ext4_get_group_no_and_offset(sb, pa->pa_pstart, + NULL, &start); + spin_unlock(&pa->pa_lock); + mb_debug(sb, "PA:%u:%d:%d\n", i, start, + pa->pa_len); + } + ext4_unlock_group(sb, i); + mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, + grp->bb_fragments); + } +} + static void ext4_mb_show_ac(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; - ext4_group_t ngroups, i; - if (!ext4_mballoc_debug || - (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) + if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) return; - ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" + mb_debug(sb, "Can't allocate:" " Allocation context details:"); - ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", + mb_debug(sb, "status %u flags 0x%x", ac->ac_status, ac->ac_flags); - ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " - "goal %lu/%lu/%lu@%lu, " + mb_debug(sb, "orig %lu/%lu/%lu@%lu, " + "goal %lu/%lu/%lu@%lu, " "best %lu/%lu/%lu@%lu cr %d", (unsigned long)ac->ac_o_ex.fe_group, (unsigned long)ac->ac_o_ex.fe_start, @@ -4149,37 +4290,17 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac) (unsigned long)ac->ac_b_ex.fe_len, (unsigned long)ac->ac_b_ex.fe_logical, (int)ac->ac_criteria); - ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found); - ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); - ngroups = ext4_get_groups_count(sb); - for (i = 0; i < ngroups; i++) { - struct ext4_group_info *grp = ext4_get_group_info(sb, i); - struct ext4_prealloc_space *pa; - ext4_grpblk_t start; - struct list_head *cur; - ext4_lock_group(sb, i); - list_for_each(cur, &grp->bb_prealloc_list) { - pa = list_entry(cur, struct ext4_prealloc_space, - pa_group_list); - spin_lock(&pa->pa_lock); - ext4_get_group_no_and_offset(sb, pa->pa_pstart, - NULL, &start); - spin_unlock(&pa->pa_lock); - printk(KERN_ERR "PA:%u:%d:%u \n", i, - start, pa->pa_len); - } - ext4_unlock_group(sb, i); - - if (grp->bb_free == 0) - continue; - printk(KERN_ERR "%u: %d/%d \n", - i, grp->bb_free, grp->bb_fragments); - } - printk(KERN_ERR "\n"); + mb_debug(sb, "%u found", ac->ac_found); + ext4_mb_show_pa(sb); } #else +static inline void ext4_mb_show_pa(struct super_block *sb) +{ + return; +} static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) { + ext4_mb_show_pa(ac->ac_sb); return; } #endif @@ -4282,7 +4403,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, * locality group. this is a policy, actually */ ext4_mb_group_or_file(ac); - mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " + mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " "left: %u/%u, right %u/%u to %swritable\n", (unsigned) ar->len, (unsigned) ar->logical, (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, @@ -4303,7 +4424,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, struct list_head discard_list; struct ext4_prealloc_space *pa, *tmp; - mb_debug(1, "discard locality group preallocation\n"); + mb_debug(sb, "discard locality group preallocation\n"); INIT_LIST_HEAD(&discard_list); @@ -4486,6 +4607,30 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) return freed; } +static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, + struct ext4_allocation_context *ac, u64 *seq) +{ + int freed; + u64 seq_retry = 0; + bool ret = false; + + freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); + if (freed) { + ret = true; + goto out_dbg; + } + seq_retry = ext4_get_discard_pa_seq_sum(); + if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { + ac->ac_flags |= EXT4_MB_STRICT_CHECK; + *seq = seq_retry; + ret = true; + } + +out_dbg: + mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); + return ret; +} + /* * Main entry point into mballoc to allocate blocks * it tries to use preallocation first, then falls back @@ -4494,13 +4639,13 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp) { - int freed; struct ext4_allocation_context *ac = NULL; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; unsigned int inquota = 0; unsigned int reserv_clstrs = 0; + u64 seq; might_sleep(); sb = ar->inode->i_sb; @@ -4525,6 +4670,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ar->len = ar->len >> 1; } if (!ar->len) { + ext4_mb_show_pa(sb); *errp = -ENOSPC; return 0; } @@ -4562,26 +4708,32 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, } ac->ac_op = EXT4_MB_HISTORY_PREALLOC; + seq = *this_cpu_ptr(&discard_pa_seq); if (!ext4_mb_use_preallocated(ac)) { ac->ac_op = EXT4_MB_HISTORY_ALLOC; ext4_mb_normalize_request(ac, ar); + + *errp = ext4_mb_pa_alloc(ac); + if (*errp) + goto errout; repeat: /* allocate space in core */ *errp = ext4_mb_regular_allocator(ac); - if (*errp) - goto discard_and_exit; - - /* as we've just preallocated more space than - * user requested originally, we store allocated - * space in a special descriptor */ - if (ac->ac_status == AC_STATUS_FOUND && - ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) - *errp = ext4_mb_new_preallocation(ac); + /* + * pa allocated above is added to grp->bb_prealloc_list only + * when we were able to allocate some block i.e. when + * ac->ac_status == AC_STATUS_FOUND. + * And error from above mean ac->ac_status != AC_STATUS_FOUND + * So we have to free this pa here itself. + */ if (*errp) { - discard_and_exit: + ext4_mb_pa_free(ac); ext4_discard_allocated_blocks(ac); goto errout; } + if (ac->ac_status == AC_STATUS_FOUND && + ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) + ext4_mb_pa_free(ac); } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); @@ -4593,9 +4745,13 @@ repeat: ar->len = ac->ac_b_ex.fe_len; } } else { - freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); - if (freed) + if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) goto repeat; + /* + * If block allocation fails then the pa allocated above + * needs to be freed here itself. + */ + ext4_mb_pa_free(ac); *errp = -ENOSPC; } diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 88c98f17e3d9..6b4d17c2935d 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h @@ -24,19 +24,15 @@ #include "ext4.h" /* + * mb_debug() dynamic printk msgs could be used to debug mballoc code. */ #ifdef CONFIG_EXT4_DEBUG -extern ushort ext4_mballoc_debug; - -#define mb_debug(n, fmt, ...) \ -do { \ - if ((n) <= ext4_mballoc_debug) { \ - printk(KERN_DEBUG "(%s, %d): %s: " fmt, \ - __FILE__, __LINE__, __func__, ##__VA_ARGS__); \ - } \ -} while (0) +#define mb_debug(sb, fmt, ...) \ + pr_debug("[%s/%d] EXT4-fs (%s): (%s, %d): %s: " fmt, \ + current->comm, task_pid_nr(current), sb->s_id, \ + __FILE__, __LINE__, __func__, ##__VA_ARGS__) #else -#define mb_debug(n, fmt, ...) no_printk(fmt, ##__VA_ARGS__) +#define mb_debug(sb, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #define EXT4_MB_HISTORY_ALLOC 1 /* allocation */ diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index fb6520f37135..c5e3fc998211 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -287,7 +287,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data) static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, struct inode *tmp_inode) { - int retval; + int retval, retval2 = 0; __le32 i_data[3]; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode); @@ -342,7 +342,9 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, * i_blocks when freeing the indirect meta-data blocks */ retval = free_ind_block(handle, inode, i_data); - ext4_mark_inode_dirty(handle, inode); + retval2 = ext4_mark_inode_dirty(handle, inode); + if (unlikely(retval2 && !retval)) + retval = retval2; err_out: return retval; @@ -601,7 +603,7 @@ int ext4_ind_migrate(struct inode *inode) ext4_lblk_t start, end; ext4_fsblk_t blk; handle_t *handle; - int ret; + int ret, ret2 = 0; if (!ext4_has_feature_extents(inode->i_sb) || (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) @@ -655,7 +657,9 @@ int ext4_ind_migrate(struct inode *inode) memset(ei->i_data, 0, sizeof(ei->i_data)); for (i = start; i <= end; i++) ei->i_data[i] = cpu_to_le32(blk++); - ext4_mark_inode_dirty(handle, inode); + ret2 = ext4_mark_inode_dirty(handle, inode); + if (unlikely(ret2 && !ret)) + ret = ret2; errout: ext4_journal_stop(handle); up_write(&EXT4_I(inode)->i_data_sem); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a8aca4772aaa..56738b538ddf 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1993,7 +1993,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname, { unsigned int blocksize = dir->i_sb->s_blocksize; int csum_size = 0; - int err; + int err, err2; if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); @@ -2028,12 +2028,12 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname, dir->i_mtime = dir->i_ctime = current_time(dir); ext4_update_dx_flag(dir); inode_inc_iversion(dir); - ext4_mark_inode_dirty(handle, dir); + err2 = ext4_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirblock(handle, dir, bh); if (err) ext4_std_error(dir->i_sb, err); - return 0; + return err ? err : err2; } /* @@ -2223,7 +2223,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, } ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; - ext4_mark_inode_dirty(handle, dir); + retval = ext4_mark_inode_dirty(handle, dir); + if (unlikely(retval)) + goto out; } blocks = dir->i_size >> sb->s_blocksize_bits; for (block = 0; block < blocks; block++) { @@ -2576,12 +2578,12 @@ static int ext4_add_nondir(handle_t *handle, struct inode *inode = *inodep; int err = ext4_add_entry(handle, dentry, inode); if (!err) { - ext4_mark_inode_dirty(handle, inode); + err = ext4_mark_inode_dirty(handle, inode); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); d_instantiate_new(dentry, inode); *inodep = NULL; - return 0; + return err; } drop_nlink(inode); ext4_orphan_add(handle, inode); @@ -2775,7 +2777,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { handle_t *handle; struct inode *inode; - int err, credits, retries = 0; + int err, err2 = 0, credits, retries = 0; if (EXT4_DIR_LINK_MAX(dir)) return -EMLINK; @@ -2808,7 +2810,9 @@ out_clear_inode: clear_nlink(inode); ext4_orphan_add(handle, inode); unlock_new_inode(inode); - ext4_mark_inode_dirty(handle, inode); + err2 = ext4_mark_inode_dirty(handle, inode); + if (unlikely(err2)) + err = err2; ext4_journal_stop(handle); iput(inode); goto out_retry; @@ -3148,10 +3152,12 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) inode->i_size = 0; ext4_orphan_add(handle, inode); inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); - ext4_mark_inode_dirty(handle, inode); + retval = ext4_mark_inode_dirty(handle, inode); + if (retval) + goto end_rmdir; ext4_dec_count(handle, dir); ext4_update_dx_flag(dir); - ext4_mark_inode_dirty(handle, dir); + retval = ext4_mark_inode_dirty(handle, dir); #ifdef CONFIG_UNICODE /* VFS negative dentries are incompatible with Encoding and @@ -3221,7 +3227,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) goto end_unlink; dir->i_ctime = dir->i_mtime = current_time(dir); ext4_update_dx_flag(dir); - ext4_mark_inode_dirty(handle, dir); + retval = ext4_mark_inode_dirty(handle, dir); + if (retval) + goto end_unlink; if (inode->i_nlink == 0) ext4_warning_inode(inode, "Deleting file '%.*s' with no links", dentry->d_name.len, dentry->d_name.name); @@ -3230,7 +3238,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (!inode->i_nlink) ext4_orphan_add(handle, inode); inode->i_ctime = current_time(inode); - ext4_mark_inode_dirty(handle, inode); + retval = ext4_mark_inode_dirty(handle, inode); #ifdef CONFIG_UNICODE /* VFS negative dentries are incompatible with Encoding and @@ -3419,7 +3427,7 @@ retry: err = ext4_add_entry(handle, dentry, inode); if (!err) { - ext4_mark_inode_dirty(handle, inode); + err = ext4_mark_inode_dirty(handle, inode); /* this can happen only for tmpfile being * linked the first time */ @@ -3531,7 +3539,7 @@ static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent, static int ext4_setent(handle_t *handle, struct ext4_renament *ent, unsigned ino, unsigned file_type) { - int retval; + int retval, retval2; BUFFER_TRACE(ent->bh, "get write access"); retval = ext4_journal_get_write_access(handle, ent->bh); @@ -3543,19 +3551,19 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent, inode_inc_iversion(ent->dir); ent->dir->i_ctime = ent->dir->i_mtime = current_time(ent->dir); - ext4_mark_inode_dirty(handle, ent->dir); + retval = ext4_mark_inode_dirty(handle, ent->dir); BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata"); if (!ent->inlined) { - retval = ext4_handle_dirty_dirblock(handle, ent->dir, ent->bh); - if (unlikely(retval)) { - ext4_std_error(ent->dir->i_sb, retval); - return retval; + retval2 = ext4_handle_dirty_dirblock(handle, ent->dir, ent->bh); + if (unlikely(retval2)) { + ext4_std_error(ent->dir->i_sb, retval2); + return retval2; } } brelse(ent->bh); ent->bh = NULL; - return 0; + return retval; } static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, @@ -3790,7 +3798,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, EXT4_FT_CHRDEV); if (retval) goto end_rename; - ext4_mark_inode_dirty(handle, whiteout); + retval = ext4_mark_inode_dirty(handle, whiteout); + if (unlikely(retval)) + goto end_rename; } if (!new.bh) { retval = ext4_add_entry(handle, new.dentry, old.inode); @@ -3811,7 +3821,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, * rename. */ old.inode->i_ctime = current_time(old.inode); - ext4_mark_inode_dirty(handle, old.inode); + retval = ext4_mark_inode_dirty(handle, old.inode); + if (unlikely(retval)) + goto end_rename; if (!whiteout) { /* @@ -3840,12 +3852,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, } else { ext4_inc_count(handle, new.dir); ext4_update_dx_flag(new.dir); - ext4_mark_inode_dirty(handle, new.dir); + retval = ext4_mark_inode_dirty(handle, new.dir); + if (unlikely(retval)) + goto end_rename; } } - ext4_mark_inode_dirty(handle, old.dir); + retval = ext4_mark_inode_dirty(handle, old.dir); + if (unlikely(retval)) + goto end_rename; if (new.inode) { - ext4_mark_inode_dirty(handle, new.inode); + retval = ext4_mark_inode_dirty(handle, new.inode); + if (unlikely(retval)) + goto end_rename; if (!new.inode->i_nlink) ext4_orphan_add(handle, new.inode); } @@ -3979,8 +3997,12 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, ctime = current_time(old.inode); old.inode->i_ctime = ctime; new.inode->i_ctime = ctime; - ext4_mark_inode_dirty(handle, old.inode); - ext4_mark_inode_dirty(handle, new.inode); + retval = ext4_mark_inode_dirty(handle, old.inode); + if (unlikely(retval)) + goto end_rename; + retval = ext4_mark_inode_dirty(handle, new.inode); + if (unlikely(retval)) + goto end_rename; if (old.dir_bh) { retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino); diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index c1769afbf799..5761e9961682 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -7,8 +7,8 @@ * * This was originally taken from fs/mpage.c * - * The intent is the ext4_mpage_readpages() function here is intended - * to replace mpage_readpages() in the general case, not just for + * The ext4_mpage_readpages() function here is intended to + * replace mpage_readahead() in the general case, not just for * encrypted files. It has some limitations (see below), where it * will fall back to read_block_full_page(), but these limitations * should only be hit when page_size != block_size. @@ -221,14 +221,12 @@ static inline loff_t ext4_readpage_limit(struct inode *inode) return i_size_read(inode); } -int ext4_mpage_readpages(struct address_space *mapping, - struct list_head *pages, struct page *page, - unsigned nr_pages, bool is_readahead) +int ext4_mpage_readpages(struct inode *inode, + struct readahead_control *rac, struct page *page) { struct bio *bio = NULL; sector_t last_block_in_bio = 0; - struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; @@ -241,6 +239,7 @@ int ext4_mpage_readpages(struct address_space *mapping, int length; unsigned relative_block = 0; struct ext4_map_blocks map; + unsigned int nr_pages = rac ? readahead_count(rac) : 1; map.m_pblk = 0; map.m_lblk = 0; @@ -251,14 +250,9 @@ int ext4_mpage_readpages(struct address_space *mapping, int fully_mapped = 1; unsigned first_hole = blocks_per_page; - if (pages) { - page = lru_to_page(pages); - + if (rac) { + page = readahead_page(rac); prefetchw(&page->flags); - list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, page->index, - readahead_gfp_mask(mapping))) - goto next_page; } if (page_has_buffers(page)) @@ -381,7 +375,7 @@ int ext4_mpage_readpages(struct address_space *mapping, bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; bio_set_op_attrs(bio, REQ_OP_READ, - is_readahead ? REQ_RAHEAD : 0); + rac ? REQ_RAHEAD : 0); } length = first_hole << blkbits; @@ -406,10 +400,9 @@ int ext4_mpage_readpages(struct address_space *mapping, else unlock_page(page); next_page: - if (pages) + if (rac) put_page(page); } - BUG_ON(pages && !list_empty(pages)); if (bio) submit_bio(bio); return 0; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index bf5fcb477f66..a29e8ea1a7ab 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1106,6 +1106,7 @@ static void ext4_put_super(struct super_block *sb) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); fs_put_dax(sbi->s_daxdev); + fscrypt_free_dummy_context(&sbi->s_dummy_enc_ctx); #ifdef CONFIG_UNICODE utf8_unload(sbi->s_encoding); #endif @@ -1389,9 +1390,10 @@ retry: return res; } -static bool ext4_dummy_context(struct inode *inode) +static const union fscrypt_context * +ext4_get_dummy_context(struct super_block *sb) { - return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb)); + return EXT4_SB(sb)->s_dummy_enc_ctx.ctx; } static bool ext4_has_stable_inodes(struct super_block *sb) @@ -1410,7 +1412,7 @@ static const struct fscrypt_operations ext4_cryptops = { .key_prefix = "ext4:", .get_context = ext4_get_context, .set_context = ext4_set_context, - .dummy_context = ext4_dummy_context, + .get_dummy_context = ext4_get_dummy_context, .empty_dir = ext4_empty_dir, .max_namelen = EXT4_NAME_LEN, .has_stable_inodes = ext4_has_stable_inodes, @@ -1605,6 +1607,7 @@ static const match_table_t tokens = { {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, + {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_nombcache, "nombcache"}, {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */ @@ -1816,7 +1819,7 @@ static const struct mount_opts { {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT}, {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, {Opt_max_dir_size_kb, 0, MOPT_GTE0}, - {Opt_test_dummy_encryption, 0, MOPT_GTE0}, + {Opt_test_dummy_encryption, 0, MOPT_STRING}, {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, {Opt_err, 0, 0} }; @@ -1851,6 +1854,48 @@ static int ext4_sb_read_encoding(const struct ext4_super_block *es, } #endif +static int ext4_set_test_dummy_encryption(struct super_block *sb, + const char *opt, + const substring_t *arg, + bool is_remount) +{ +#ifdef CONFIG_FS_ENCRYPTION + struct ext4_sb_info *sbi = EXT4_SB(sb); + int err; + + /* + * This mount option is just for testing, and it's not worthwhile to + * implement the extra complexity (e.g. RCU protection) that would be + * needed to allow it to be set or changed during remount. We do allow + * it to be specified during remount, but only if there is no change. + */ + if (is_remount && !sbi->s_dummy_enc_ctx.ctx) { + ext4_msg(sb, KERN_WARNING, + "Can't set test_dummy_encryption on remount"); + return -1; + } + err = fscrypt_set_test_dummy_encryption(sb, arg, &sbi->s_dummy_enc_ctx); + if (err) { + if (err == -EEXIST) + ext4_msg(sb, KERN_WARNING, + "Can't change test_dummy_encryption on remount"); + else if (err == -EINVAL) + ext4_msg(sb, KERN_WARNING, + "Value of option \"%s\" is unrecognized", opt); + else + ext4_msg(sb, KERN_WARNING, + "Error processing option \"%s\" [%d]", + opt, err); + return -1; + } + ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); +#else + ext4_msg(sb, KERN_WARNING, + "Test dummy encryption mount option ignored"); +#endif + return 1; +} + static int handle_mount_opt(struct super_block *sb, char *opt, int token, substring_t *args, unsigned long *journal_devnum, unsigned int *journal_ioprio, int is_remount) @@ -2047,14 +2092,8 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg); } else if (token == Opt_test_dummy_encryption) { -#ifdef CONFIG_FS_ENCRYPTION - sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION; - ext4_msg(sb, KERN_WARNING, - "Test dummy encryption mode enabled"); -#else - ext4_msg(sb, KERN_WARNING, - "Test dummy encryption mount option ignored"); -#endif + return ext4_set_test_dummy_encryption(sb, opt, &args[0], + is_remount); } else if (m->flags & MOPT_DATAJ) { if (is_remount) { if (!sbi->s_journal) @@ -2311,8 +2350,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); if (test_opt(sb, DATA_ERR_ABORT)) SEQ_OPTS_PUTS("data_err=abort"); - if (DUMMY_ENCRYPTION_ENABLED(sbi)) - SEQ_OPTS_PUTS("test_dummy_encryption"); + + fscrypt_show_test_dummy_encryption(seq, sep, sb); ext4_show_quota_options(seq, sb); return 0; @@ -3679,7 +3718,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) int blocksize, clustersize; unsigned int db_count; unsigned int i; - int needs_recovery, has_huge_files, has_bigalloc; + int needs_recovery, has_huge_files; __u64 blocks_count; int err = 0; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; @@ -3971,17 +4010,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support!\n"); + /* can't mount with both data=journal and dioread_nolock. */ clear_opt(sb, DIOREAD_NOLOCK); if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); goto failed_mount; } - if (test_opt(sb, DIOREAD_NOLOCK)) { - ext4_msg(sb, KERN_ERR, "can't mount with " - "both data=journal and dioread_nolock"); - goto failed_mount; - } if (test_opt(sb, DAX)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); @@ -4198,8 +4233,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* Handle clustersize */ clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); - has_bigalloc = ext4_has_feature_bigalloc(sb); - if (has_bigalloc) { + if (ext4_has_feature_bigalloc(sb)) { if (clustersize < blocksize) { ext4_msg(sb, KERN_ERR, "cluster size (%d) smaller than " @@ -4780,6 +4814,7 @@ failed_mount: for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(get_qf_name(sb, sbi, i)); #endif + fscrypt_free_dummy_context(&sbi->s_dummy_enc_ctx); ext4_blkdev_remove(sbi); brelse(bh); out_fail: @@ -5256,7 +5291,7 @@ static int ext4_sync_fs(struct super_block *sb, int wait) needs_barrier = true; if (needs_barrier) { int err; - err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); + err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL); if (!ret) ret = err; } @@ -5885,7 +5920,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL; inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, S_NOATIME | S_IMMUTABLE); - ext4_mark_inode_dirty(handle, inode); + err = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); unlock_inode: inode_unlock(inode); @@ -5987,12 +6022,14 @@ static int ext4_quota_off(struct super_block *sb, int type) * this is not a hard failure and quotas are already disabled. */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); - if (IS_ERR(handle)) + if (IS_ERR(handle)) { + err = PTR_ERR(handle); goto out_unlock; + } EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL); inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); inode->i_mtime = inode->i_ctime = current_time(inode); - ext4_mark_inode_dirty(handle, inode); + err = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out_unlock: inode_unlock(inode); @@ -6050,7 +6087,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); - int err, offset = off & (sb->s_blocksize - 1); + int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1); int retries = 0; struct buffer_head *bh; handle_t *handle = journal_current_handle(); @@ -6098,9 +6135,11 @@ out: if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT4_I(inode)->i_disksize = inode->i_size; - ext4_mark_inode_dirty(handle, inode); + err2 = ext4_mark_inode_dirty(handle, inode); + if (unlikely(err2 && !err)) + err = err2; } - return len; + return err ? err : len; } #endif diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index 04bfaf63752c..6c9fc9e21c13 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -293,6 +293,7 @@ EXT4_ATTR_FEATURE(batched_discard); EXT4_ATTR_FEATURE(meta_bg_resize); #ifdef CONFIG_FS_ENCRYPTION EXT4_ATTR_FEATURE(encryption); +EXT4_ATTR_FEATURE(test_dummy_encryption_v2); #endif #ifdef CONFIG_UNICODE EXT4_ATTR_FEATURE(casefold); @@ -308,6 +309,7 @@ static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(meta_bg_resize), #ifdef CONFIG_FS_ENCRYPTION ATTR_LIST(encryption), + ATTR_LIST(test_dummy_encryption_v2), #endif #ifdef CONFIG_UNICODE ATTR_LIST(casefold), diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index dc5ec724d889..dec1244dd062 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -342,37 +342,6 @@ static int ext4_get_verity_descriptor(struct inode *inode, void *buf, return desc_size; } -/* - * Prefetch some pages from the file's Merkle tree. - * - * This is basically a stripped-down version of __do_page_cache_readahead() - * which works on pages past i_size. - */ -static void ext4_merkle_tree_readahead(struct address_space *mapping, - pgoff_t start_index, unsigned long count) -{ - LIST_HEAD(pages); - unsigned int nr_pages = 0; - struct page *page; - pgoff_t index; - struct blk_plug plug; - - for (index = start_index; index < start_index + count; index++) { - page = xa_load(&mapping->i_pages, index); - if (!page || xa_is_value(page)) { - page = __page_cache_alloc(readahead_gfp_mask(mapping)); - if (!page) - break; - page->index = index; - list_add(&page->lru, &pages); - nr_pages++; - } - } - blk_start_plug(&plug); - ext4_mpage_readpages(mapping, &pages, NULL, nr_pages, true); - blk_finish_plug(&plug); -} - static struct page *ext4_read_merkle_tree_page(struct inode *inode, pgoff_t index, unsigned long num_ra_pages) @@ -386,8 +355,8 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode, if (page) put_page(page); else if (num_ra_pages > 1) - ext4_merkle_tree_readahead(inode->i_mapping, index, - num_ra_pages); + page_cache_readahead_unbounded(inode->i_mapping, NULL, + index, num_ra_pages, 0); page = read_mapping_page(inode->i_mapping, index, NULL); } return page; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 21df43a25328..9b29a40738ac 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1327,7 +1327,7 @@ static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode, int blocksize = ea_inode->i_sb->s_blocksize; int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits; int csize, wsize = 0; - int ret = 0; + int ret = 0, ret2 = 0; int retries = 0; retry: @@ -1385,7 +1385,9 @@ retry: ext4_update_i_disksize(ea_inode, wsize); inode_unlock(ea_inode); - ext4_mark_inode_dirty(handle, ea_inode); + ret2 = ext4_mark_inode_dirty(handle, ea_inode); + if (unlikely(ret2 && !ret)) + ret = ret2; out: brelse(bh); @@ -1800,8 +1802,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, if (EXT4_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); - if (IS_ERR(bs->bh)) - return PTR_ERR(bs->bh); + if (IS_ERR(bs->bh)) { + error = PTR_ERR(bs->bh); + bs->bh = NULL; + return error; + } ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index cdf2f626bea7..072fb19555a8 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -19,6 +19,7 @@ #include <linux/uio.h> #include <linux/cleancache.h> #include <linux/sched/signal.h> +#include <linux/fiemap.h> #include "f2fs.h" #include "node.h" @@ -1824,7 +1825,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return ret; } - ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR); + ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR); if (ret) return ret; @@ -2177,13 +2178,11 @@ out: * use ->readpage() or do the necessary surgery to decouple ->readpages() * from read-ahead. */ -int f2fs_mpage_readpages(struct address_space *mapping, - struct list_head *pages, struct page *page, - unsigned nr_pages, bool is_readahead) +static int f2fs_mpage_readpages(struct inode *inode, + struct readahead_control *rac, struct page *page) { struct bio *bio = NULL; sector_t last_block_in_bio = 0; - struct inode *inode = mapping->host; struct f2fs_map_blocks map; #ifdef CONFIG_F2FS_FS_COMPRESSION struct compress_ctx cc = { @@ -2197,6 +2196,7 @@ int f2fs_mpage_readpages(struct address_space *mapping, .nr_cpages = 0, }; #endif + unsigned nr_pages = rac ? readahead_count(rac) : 1; unsigned max_nr_pages = nr_pages; int ret = 0; @@ -2210,15 +2210,9 @@ int f2fs_mpage_readpages(struct address_space *mapping, map.m_may_create = false; for (; nr_pages; nr_pages--) { - if (pages) { - page = list_last_entry(pages, struct page, lru); - + if (rac) { + page = readahead_page(rac); prefetchw(&page->flags); - list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, - page_index(page), - readahead_gfp_mask(mapping))) - goto next_page; } #ifdef CONFIG_F2FS_FS_COMPRESSION @@ -2228,7 +2222,7 @@ int f2fs_mpage_readpages(struct address_space *mapping, ret = f2fs_read_multi_pages(&cc, &bio, max_nr_pages, &last_block_in_bio, - is_readahead, false); + rac != NULL, false); f2fs_destroy_compress_ctx(&cc); if (ret) goto set_error_page; @@ -2251,7 +2245,7 @@ read_single_page: #endif ret = f2fs_read_single_page(inode, page, max_nr_pages, &map, - &bio, &last_block_in_bio, is_readahead); + &bio, &last_block_in_bio, rac); if (ret) { #ifdef CONFIG_F2FS_FS_COMPRESSION set_error_page: @@ -2260,8 +2254,10 @@ set_error_page: zero_user_segment(page, 0, PAGE_SIZE); unlock_page(page); } +#ifdef CONFIG_F2FS_FS_COMPRESSION next_page: - if (pages) +#endif + if (rac) put_page(page); #ifdef CONFIG_F2FS_FS_COMPRESSION @@ -2271,16 +2267,15 @@ next_page: ret = f2fs_read_multi_pages(&cc, &bio, max_nr_pages, &last_block_in_bio, - is_readahead, false); + rac != NULL, false); f2fs_destroy_compress_ctx(&cc); } } #endif } - BUG_ON(pages && !list_empty(pages)); if (bio) __submit_bio(F2FS_I_SB(inode), bio, DATA); - return pages ? 0 : ret; + return ret; } static int f2fs_read_data_page(struct file *file, struct page *page) @@ -2299,28 +2294,24 @@ static int f2fs_read_data_page(struct file *file, struct page *page) if (f2fs_has_inline_data(inode)) ret = f2fs_read_inline_data(inode, page); if (ret == -EAGAIN) - ret = f2fs_mpage_readpages(page_file_mapping(page), - NULL, page, 1, false); + ret = f2fs_mpage_readpages(inode, NULL, page); return ret; } -static int f2fs_read_data_pages(struct file *file, - struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void f2fs_readahead(struct readahead_control *rac) { - struct inode *inode = mapping->host; - struct page *page = list_last_entry(pages, struct page, lru); + struct inode *inode = rac->mapping->host; - trace_f2fs_readpages(inode, page, nr_pages); + trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac)); if (!f2fs_is_compress_backend_ready(inode)) - return 0; + return; /* If the file has inline data, skip readpages */ if (f2fs_has_inline_data(inode)) - return 0; + return; - return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true); + f2fs_mpage_readpages(inode, rac, NULL); } int f2fs_encrypt_one_page(struct f2fs_io_info *fio) @@ -3805,7 +3796,7 @@ static void f2fs_swap_deactivate(struct file *file) const struct address_space_operations f2fs_dblock_aops = { .readpage = f2fs_read_data_page, - .readpages = f2fs_read_data_pages, + .readahead = f2fs_readahead, .writepage = f2fs_write_data_page, .writepages = f2fs_write_data_pages, .write_begin = f2fs_write_begin, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index ba470d5687fe..5c0149d2f46a 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -138,7 +138,7 @@ struct f2fs_mount_info { int fsync_mode; /* fsync policy */ int fs_mode; /* fs mode: LFS or ADAPTIVE */ int bggc_mode; /* bggc mode: off, on or sync */ - bool test_dummy_encryption; /* test dummy encryption */ + struct fscrypt_dummy_context dummy_enc_ctx; /* test dummy encryption */ block_t unusable_cap; /* Amount of space allowed to be * unusable when disabling checkpoint */ @@ -1259,7 +1259,7 @@ enum fsync_mode { #ifdef CONFIG_FS_ENCRYPTION #define DUMMY_ENCRYPTION_ENABLED(sbi) \ - (unlikely(F2FS_OPTION(sbi).test_dummy_encryption)) + (unlikely(F2FS_OPTION(sbi).dummy_enc_ctx.ctx != NULL)) #else #define DUMMY_ENCRYPTION_ENABLED(sbi) (0) #endif @@ -3051,19 +3051,12 @@ static inline void f2fs_set_page_private(struct page *page, if (PagePrivate(page)) return; - get_page(page); - SetPagePrivate(page); - set_page_private(page, data); + attach_page_private(page, (void *)data); } static inline void f2fs_clear_page_private(struct page *page) { - if (!PagePrivate(page)) - return; - - set_page_private(page, 0); - ClearPagePrivate(page); - f2fs_put_page(page, 0); + detach_page_private(page); } /* @@ -3373,9 +3366,6 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn); int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); -int f2fs_mpage_readpages(struct address_space *mapping, - struct list_head *pages, struct page *page, - unsigned nr_pages, bool is_readahead); struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, int op_flags, bool for_write); struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 5bc4dcd8fc03..8c4ea5003ef8 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -12,7 +12,6 @@ #include <linux/types.h> #include <linux/fs.h> #include <linux/f2fs_fs.h> -#include <linux/cryptohash.h> #include <linux/pagemap.h> #include <linux/unicode.h> diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 4167e5408151..9686ffea177e 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -8,6 +8,7 @@ #include <linux/fs.h> #include <linux/f2fs_fs.h> +#include <linux/fiemap.h> #include "f2fs.h" #include "node.h" diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index f2dfc21c6abb..8a9955902d84 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -202,6 +202,7 @@ static match_table_t f2fs_tokens = { {Opt_whint, "whint_mode=%s"}, {Opt_alloc, "alloc_mode=%s"}, {Opt_fsync, "fsync_mode=%s"}, + {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_checkpoint_disable, "checkpoint=disable"}, {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"}, @@ -394,7 +395,52 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) } #endif -static int parse_options(struct super_block *sb, char *options) +static int f2fs_set_test_dummy_encryption(struct super_block *sb, + const char *opt, + const substring_t *arg, + bool is_remount) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); +#ifdef CONFIG_FS_ENCRYPTION + int err; + + if (!f2fs_sb_has_encrypt(sbi)) { + f2fs_err(sbi, "Encrypt feature is off"); + return -EINVAL; + } + + /* + * This mount option is just for testing, and it's not worthwhile to + * implement the extra complexity (e.g. RCU protection) that would be + * needed to allow it to be set or changed during remount. We do allow + * it to be specified during remount, but only if there is no change. + */ + if (is_remount && !F2FS_OPTION(sbi).dummy_enc_ctx.ctx) { + f2fs_warn(sbi, "Can't set test_dummy_encryption on remount"); + return -EINVAL; + } + err = fscrypt_set_test_dummy_encryption( + sb, arg, &F2FS_OPTION(sbi).dummy_enc_ctx); + if (err) { + if (err == -EEXIST) + f2fs_warn(sbi, + "Can't change test_dummy_encryption on remount"); + else if (err == -EINVAL) + f2fs_warn(sbi, "Value of option \"%s\" is unrecognized", + opt); + else + f2fs_warn(sbi, "Error processing option \"%s\" [%d]", + opt, err); + return -EINVAL; + } + f2fs_warn(sbi, "Test dummy encryption mode enabled"); +#else + f2fs_warn(sbi, "Test dummy encryption mount option ignored"); +#endif + return 0; +} + +static int parse_options(struct super_block *sb, char *options, bool is_remount) { struct f2fs_sb_info *sbi = F2FS_SB(sb); substring_t args[MAX_OPT_ARGS]; @@ -403,9 +449,7 @@ static int parse_options(struct super_block *sb, char *options) int arg = 0, ext_cnt; kuid_t uid; kgid_t gid; -#ifdef CONFIG_QUOTA int ret; -#endif if (!options) return 0; @@ -778,17 +822,10 @@ static int parse_options(struct super_block *sb, char *options) kvfree(name); break; case Opt_test_dummy_encryption: -#ifdef CONFIG_FS_ENCRYPTION - if (!f2fs_sb_has_encrypt(sbi)) { - f2fs_err(sbi, "Encrypt feature is off"); - return -EINVAL; - } - - F2FS_OPTION(sbi).test_dummy_encryption = true; - f2fs_info(sbi, "Test dummy encryption mode enabled"); -#else - f2fs_info(sbi, "Test dummy encryption mount option ignored"); -#endif + ret = f2fs_set_test_dummy_encryption(sb, p, &args[0], + is_remount); + if (ret) + return ret; break; case Opt_checkpoint_disable_cap_perc: if (args->from && match_int(args, &arg)) @@ -1213,6 +1250,7 @@ static void f2fs_put_super(struct super_block *sb) for (i = 0; i < MAXQUOTAS; i++) kvfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif + fscrypt_free_dummy_context(&F2FS_OPTION(sbi).dummy_enc_ctx); destroy_percpu_info(sbi); for (i = 0; i < NR_PAGE_TYPE; i++) kvfree(sbi->write_io[i]); @@ -1543,10 +1581,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_printf(seq, ",whint_mode=%s", "user-based"); else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) seq_printf(seq, ",whint_mode=%s", "fs-based"); -#ifdef CONFIG_FS_ENCRYPTION - if (F2FS_OPTION(sbi).test_dummy_encryption) - seq_puts(seq, ",test_dummy_encryption"); -#endif + + fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) seq_printf(seq, ",alloc_mode=%s", "default"); @@ -1575,7 +1611,6 @@ static void default_options(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; - F2FS_OPTION(sbi).test_dummy_encryption = false; F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; @@ -1734,7 +1769,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) default_options(sbi); /* parse mount options */ - err = parse_options(sb, data); + err = parse_options(sb, data, true); if (err) goto restore_opts; checkpoint_changed = @@ -2410,9 +2445,10 @@ static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, ctx, len, fs_data, XATTR_CREATE); } -static bool f2fs_dummy_context(struct inode *inode) +static const union fscrypt_context * +f2fs_get_dummy_context(struct super_block *sb) { - return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode)); + return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_ctx.ctx; } static bool f2fs_has_stable_inodes(struct super_block *sb) @@ -2431,7 +2467,7 @@ static const struct fscrypt_operations f2fs_cryptops = { .key_prefix = "f2fs:", .get_context = f2fs_get_context, .set_context = f2fs_set_context, - .dummy_context = f2fs_dummy_context, + .get_dummy_context = f2fs_get_dummy_context, .empty_dir = f2fs_empty_dir, .max_namelen = F2FS_NAME_LEN, .has_stable_inodes = f2fs_has_stable_inodes, @@ -3366,7 +3402,7 @@ try_onemore: goto free_sb_buf; } - err = parse_options(sb, options); + err = parse_options(sb, options, false); if (err) goto free_options; @@ -3769,6 +3805,7 @@ free_options: for (i = 0; i < MAXQUOTAS; i++) kvfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif + fscrypt_free_dummy_context(&F2FS_OPTION(sbi).dummy_enc_ctx); kvfree(options); free_sb_buf: kvfree(raw_super); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index e3bbbef9b4f0..3162f46b3c9b 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -446,6 +446,7 @@ enum feat_id { FEAT_SB_CHECKSUM, FEAT_CASEFOLD, FEAT_COMPRESSION, + FEAT_TEST_DUMMY_ENCRYPTION_V2, }; static ssize_t f2fs_feature_show(struct f2fs_attr *a, @@ -466,6 +467,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a, case FEAT_SB_CHECKSUM: case FEAT_CASEFOLD: case FEAT_COMPRESSION: + case FEAT_TEST_DUMMY_ENCRYPTION_V2: return sprintf(buf, "supported\n"); } return 0; @@ -563,6 +565,7 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks); #ifdef CONFIG_FS_ENCRYPTION F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO); +F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2, FEAT_TEST_DUMMY_ENCRYPTION_V2); #endif #ifdef CONFIG_BLK_DEV_ZONED F2FS_FEATURE_RO_ATTR(block_zoned, FEAT_BLKZONED); @@ -647,6 +650,7 @@ ATTRIBUTE_GROUPS(f2fs); static struct attribute *f2fs_feat_attrs[] = { #ifdef CONFIG_FS_ENCRYPTION ATTR_LIST(encryption), + ATTR_LIST(test_dummy_encryption_v2), #endif #ifdef CONFIG_BLK_DEV_ZONED ATTR_LIST(block_zoned), diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c index d7d430a6f130..865c9fb774fb 100644 --- a/fs/f2fs/verity.c +++ b/fs/f2fs/verity.c @@ -222,37 +222,6 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf, return size; } -/* - * Prefetch some pages from the file's Merkle tree. - * - * This is basically a stripped-down version of __do_page_cache_readahead() - * which works on pages past i_size. - */ -static void f2fs_merkle_tree_readahead(struct address_space *mapping, - pgoff_t start_index, unsigned long count) -{ - LIST_HEAD(pages); - unsigned int nr_pages = 0; - struct page *page; - pgoff_t index; - struct blk_plug plug; - - for (index = start_index; index < start_index + count; index++) { - page = xa_load(&mapping->i_pages, index); - if (!page || xa_is_value(page)) { - page = __page_cache_alloc(readahead_gfp_mask(mapping)); - if (!page) - break; - page->index = index; - list_add(&page->lru, &pages); - nr_pages++; - } - } - blk_start_plug(&plug); - f2fs_mpage_readpages(mapping, &pages, NULL, nr_pages, true); - blk_finish_plug(&plug); -} - static struct page *f2fs_read_merkle_tree_page(struct inode *inode, pgoff_t index, unsigned long num_ra_pages) @@ -266,8 +235,8 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode, if (page) put_page(page); else if (num_ra_pages > 1) - f2fs_merkle_tree_readahead(inode->i_mapping, index, - num_ra_pages); + page_cache_readahead_unbounded(inode->i_mapping, NULL, + index, num_ra_pages, 0); page = read_mapping_page(inode->i_mapping, index, NULL); } return page; diff --git a/fs/fat/Kconfig b/fs/fat/Kconfig index 718163d0c621..ca31993dcb47 100644 --- a/fs/fat/Kconfig +++ b/fs/fat/Kconfig @@ -69,7 +69,7 @@ config VFAT_FS The VFAT support enlarges your kernel by about 10 KB and it only works if you said Y to the "DOS FAT fs support" above. Please read - the file <file:Documentation/filesystems/vfat.txt> for details. If + the file <file:Documentation/filesystems/vfat.rst> for details. If unsure, say Y. To compile this as a module, choose M here: the module will be called @@ -82,7 +82,7 @@ config FAT_DEFAULT_CODEPAGE help This option should be set to the codepage of your FAT filesystems. It can be overridden with the "codepage" mount option. - See <file:Documentation/filesystems/vfat.txt> for more information. + See <file:Documentation/filesystems/vfat.rst> for more information. config FAT_DEFAULT_IOCHARSET string "Default iocharset for FAT" @@ -96,7 +96,7 @@ config FAT_DEFAULT_IOCHARSET Note that "utf8" is not recommended for FAT filesystems. If unsure, you shouldn't set "utf8" here - select the next option instead if you would like to use UTF-8 encoded file names by default. - See <file:Documentation/filesystems/vfat.txt> for more information. + See <file:Documentation/filesystems/vfat.rst> for more information. Enable any character sets you need in File Systems/Native Language Support. @@ -114,4 +114,4 @@ config FAT_DEFAULT_UTF8 Say Y if you use UTF-8 encoding for file names, N otherwise. - See <file:Documentation/filesystems/vfat.txt> for more information. + See <file:Documentation/filesystems/vfat.rst> for more information. diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 054acd9fd033..b4ddf48fa444 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -804,8 +804,6 @@ static long fat_dir_ioctl(struct file *filp, unsigned int cmd, return fat_generic_ioctl(filp, cmd, arg); } - if (!access_ok(d1, sizeof(struct __fat_dirent[2]))) - return -EFAULT; /* * Yes, we don't need this put_user() absolutely. However old * code didn't return the right value. So, app use this value, @@ -844,8 +842,6 @@ static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd, return fat_generic_ioctl(filp, cmd, (unsigned long)arg); } - if (!access_ok(d1, sizeof(struct compat_dirent[2]))) - return -EFAULT; /* * Yes, we don't need this put_user() absolutely. However old * code didn't return the right value. So, app use this value, diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index 3647c65a0f48..bbfe18c07417 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -632,20 +632,80 @@ error: } EXPORT_SYMBOL_GPL(fat_free_clusters); -/* 128kb is the whole sectors for FAT12 and FAT16 */ -#define FAT_READA_SIZE (128 * 1024) +struct fatent_ra { + sector_t cur; + sector_t limit; + + unsigned int ra_blocks; + sector_t ra_advance; + sector_t ra_next; + sector_t ra_limit; +}; -static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent, - unsigned long reada_blocks) +static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra, + struct fat_entry *fatent, int ent_limit) { - const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; - sector_t blocknr; - int i, offset; + struct msdos_sb_info *sbi = MSDOS_SB(sb); + const struct fatent_operations *ops = sbi->fatent_ops; + sector_t blocknr, block_end; + int offset; + /* + * This is the sequential read, so ra_pages * 2 (but try to + * align the optimal hardware IO size). + * [BTW, 128kb covers the whole sectors for FAT12 and FAT16] + */ + unsigned long ra_pages = sb->s_bdi->ra_pages; + unsigned int reada_blocks; + if (ra_pages > sb->s_bdi->io_pages) + ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages); + reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1); + + /* Initialize the range for sequential read */ ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); + ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end); + ra->cur = 0; + ra->limit = (block_end + 1) - blocknr; - for (i = 0; i < reada_blocks; i++) - sb_breadahead(sb, blocknr + i); + /* Advancing the window at half size */ + ra->ra_blocks = reada_blocks >> 1; + ra->ra_advance = ra->cur; + ra->ra_next = ra->cur; + ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit); +} + +/* Assuming to be called before reading a new block (increments ->cur). */ +static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra, + struct fat_entry *fatent) +{ + if (ra->ra_next >= ra->ra_limit) + return; + + if (ra->cur >= ra->ra_advance) { + struct msdos_sb_info *sbi = MSDOS_SB(sb); + const struct fatent_operations *ops = sbi->fatent_ops; + struct blk_plug plug; + sector_t blocknr, diff; + int offset; + + ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); + + diff = blocknr - ra->cur; + blk_start_plug(&plug); + /* + * FIXME: we would want to directly use the bio with + * pages to reduce the number of segments. + */ + for (; ra->ra_next < ra->ra_limit; ra->ra_next++) + sb_breadahead(sb, ra->ra_next + diff); + blk_finish_plug(&plug); + + /* Advance the readahead window */ + ra->ra_advance += ra->ra_blocks; + ra->ra_limit += min_t(sector_t, + ra->ra_blocks, ra->limit - ra->ra_limit); + } + ra->cur++; } int fat_count_free_clusters(struct super_block *sb) @@ -653,27 +713,20 @@ int fat_count_free_clusters(struct super_block *sb) struct msdos_sb_info *sbi = MSDOS_SB(sb); const struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent; - unsigned long reada_blocks, reada_mask, cur_block; + struct fatent_ra fatent_ra; int err = 0, free; lock_fat(sbi); if (sbi->free_clusters != -1 && sbi->free_clus_valid) goto out; - reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits; - reada_mask = reada_blocks - 1; - cur_block = 0; - free = 0; fatent_init(&fatent); fatent_set_entry(&fatent, FAT_START_ENT); + fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster); while (fatent.entry < sbi->max_cluster) { /* readahead of fat blocks */ - if ((cur_block & reada_mask) == 0) { - unsigned long rest = sbi->fat_length - cur_block; - fat_ent_reada(sb, &fatent, min(reada_blocks, rest)); - } - cur_block++; + fat_ent_reada(sb, &fatent_ra, &fatent); err = fat_ent_read_block(sb, &fatent); if (err) @@ -707,9 +760,9 @@ int fat_trim_fs(struct inode *inode, struct fstrim_range *range) struct msdos_sb_info *sbi = MSDOS_SB(sb); const struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent; + struct fatent_ra fatent_ra; u64 ent_start, ent_end, minlen, trimmed = 0; u32 free = 0; - unsigned long reada_blocks, reada_mask, cur_block = 0; int err = 0; /* @@ -727,19 +780,13 @@ int fat_trim_fs(struct inode *inode, struct fstrim_range *range) if (ent_end >= sbi->max_cluster) ent_end = sbi->max_cluster - 1; - reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits; - reada_mask = reada_blocks - 1; - fatent_init(&fatent); lock_fat(sbi); fatent_set_entry(&fatent, ent_start); + fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1); while (fatent.entry <= ent_end) { /* readahead of fat blocks */ - if ((cur_block & reada_mask) == 0) { - unsigned long rest = sbi->fat_length - cur_block; - fat_ent_reada(sb, &fatent, min(reada_blocks, rest)); - } - cur_block++; + fat_ent_reada(sb, &fatent_ra, &fatent); err = fat_ent_read_block(sb, &fatent); if (err) diff --git a/fs/fat/file.c b/fs/fat/file.c index bdc4503c00a3..42134c58c87e 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -195,7 +195,7 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) if (err) return err; - return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); } diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 71946da84388..a0cf99debb1e 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -210,10 +210,9 @@ static int fat_readpage(struct file *file, struct page *page) return mpage_readpage(page, fat_get_block); } -static int fat_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void fat_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, fat_get_block); + mpage_readahead(rac, fat_get_block); } static void fat_write_failed(struct address_space *mapping, loff_t to) @@ -344,7 +343,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from) static const struct address_space_operations fat_aops = { .readpage = fat_readpage, - .readpages = fat_readpages, + .readahead = fat_readahead, .writepage = fat_writepage, .writepages = fat_writepages, .write_begin = fat_write_begin, @@ -1520,6 +1519,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b, goto out; } + if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) { + if (!silent) + fat_msg(sb, KERN_ERR, "bogus number of FAT sectors"); + goto out; + } + error = 0; out: diff --git a/fs/file_table.c b/fs/file_table.c index 30d55c9a1744..656647f9575a 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -80,14 +80,14 @@ EXPORT_SYMBOL_GPL(get_max_files); */ #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) int proc_nr_files(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = get_nr_files(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } #else int proc_nr_files(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } @@ -198,6 +198,7 @@ static struct file *alloc_file(const struct path *path, int flags, file->f_inode = path->dentry->d_inode; file->f_mapping = path->dentry->d_inode->i_mapping; file->f_wb_err = filemap_sample_wb_err(file->f_mapping); + file->f_sb_err = file_sample_sb_err(file); if ((file->f_mode & FMODE_READ) && likely(fop->read || fop->read_iter)) file->f_mode |= FMODE_CAN_READ; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 76ac9c7d32ec..a605c3dddabc 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1070,7 +1070,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, static unsigned long get_nr_dirty_pages(void) { return global_node_page_state(NR_FILE_DIRTY) + - global_node_page_state(NR_UNSTABLE_NFS) + get_nr_dirty_inodes(); } @@ -1126,6 +1125,7 @@ void inode_io_list_del(struct inode *inode) inode_io_list_del_locked(inode, wb); spin_unlock(&wb->list_lock); } +EXPORT_SYMBOL(inode_io_list_del); /* * mark an inode as under writeback on the sb @@ -2320,7 +2320,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) WARN(bdi_cap_writeback_dirty(wb->bdi) && !test_bit(WB_registered, &wb->state), - "bdi-%s not registered\n", wb->bdi->name); + "bdi-%s not registered\n", bdi_dev_name(wb->bdi)); inode->dirtied_when = jiffies; if (dirtytime) diff --git a/fs/fs_context.c b/fs/fs_context.c index fc9f6ef93b55..7d5c5dd2b1d5 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -42,7 +42,6 @@ static const struct constant_table common_set_sb_flag[] = { { "dirsync", SB_DIRSYNC }, { "lazytime", SB_LAZYTIME }, { "mand", SB_MANDLOCK }, - { "posixacl", SB_POSIXACL }, { "ro", SB_RDONLY }, { "sync", SB_SYNCHRONOUS }, { }, @@ -53,44 +52,15 @@ static const struct constant_table common_clear_sb_flag[] = { { "nolazytime", SB_LAZYTIME }, { "nomand", SB_MANDLOCK }, { "rw", SB_RDONLY }, - { "silent", SB_SILENT }, { }, }; -static const char *const forbidden_sb_flag[] = { - "bind", - "dev", - "exec", - "move", - "noatime", - "nodev", - "nodiratime", - "noexec", - "norelatime", - "nostrictatime", - "nosuid", - "private", - "rec", - "relatime", - "remount", - "shared", - "slave", - "strictatime", - "suid", - "unbindable", -}; - /* * Check for a common mount option that manipulates s_flags. */ static int vfs_parse_sb_flag(struct fs_context *fc, const char *key) { unsigned int token; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(forbidden_sb_flag); i++) - if (strcmp(key, forbidden_sb_flag[i]) == 0) - return -EINVAL; token = lookup_constant(common_set_sb_flag, key, 0); if (token) { diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig index 506c5e643f0d..5e796e6c38e5 100644 --- a/fs/fscache/Kconfig +++ b/fs/fscache/Kconfig @@ -8,7 +8,7 @@ config FSCACHE Different sorts of caches can be plugged in, depending on the resources available. - See Documentation/filesystems/caching/fscache.txt for more information. + See Documentation/filesystems/caching/fscache.rst for more information. config FSCACHE_STATS bool "Gather statistical information on local caching" @@ -25,7 +25,7 @@ config FSCACHE_STATS between CPUs. On the other hand, the stats are very useful for debugging purposes. Saying 'Y' here is recommended. - See Documentation/filesystems/caching/fscache.txt for more information. + See Documentation/filesystems/caching/fscache.rst for more information. config FSCACHE_HISTOGRAM bool "Gather latency information on local caching" @@ -42,7 +42,7 @@ config FSCACHE_HISTOGRAM bouncing between CPUs. On the other hand, the histogram may be useful for debugging purposes. Saying 'N' here is recommended. - See Documentation/filesystems/caching/fscache.txt for more information. + See Documentation/filesystems/caching/fscache.rst for more information. config FSCACHE_DEBUG bool "Debug FS-Cache" @@ -52,7 +52,7 @@ config FSCACHE_DEBUG management module. If this is set, the debugging output may be enabled by setting bits in /sys/modules/fscache/parameter/debug. - See Documentation/filesystems/caching/fscache.txt for more information. + See Documentation/filesystems/caching/fscache.rst for more information. config FSCACHE_OBJECT_LIST bool "Maintain global object list for debugging purposes" diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c index f78793f3d21e..fcc136361415 100644 --- a/fs/fscache/cache.c +++ b/fs/fscache/cache.c @@ -172,7 +172,7 @@ no_preference: * * Initialise a record of a cache and fill in the name. * - * See Documentation/filesystems/caching/backend-api.txt for a complete + * See Documentation/filesystems/caching/backend-api.rst for a complete * description. */ void fscache_init_cache(struct fscache_cache *cache, @@ -207,7 +207,7 @@ EXPORT_SYMBOL(fscache_init_cache); * * Add a cache to the system, making it available for netfs's to use. * - * See Documentation/filesystems/caching/backend-api.txt for a complete + * See Documentation/filesystems/caching/backend-api.rst for a complete * description. */ int fscache_add_cache(struct fscache_cache *cache, @@ -307,7 +307,7 @@ EXPORT_SYMBOL(fscache_add_cache); * Note that an I/O error occurred in a cache and that it should no longer be * used for anything. This also reports the error into the kernel log. * - * See Documentation/filesystems/caching/backend-api.txt for a complete + * See Documentation/filesystems/caching/backend-api.rst for a complete * description. */ void fscache_io_error(struct fscache_cache *cache) @@ -355,7 +355,7 @@ static void fscache_withdraw_all_objects(struct fscache_cache *cache, * Withdraw a cache from service, unbinding all its cache objects from the * netfs cookies they're currently representing. * - * See Documentation/filesystems/caching/backend-api.txt for a complete + * See Documentation/filesystems/caching/backend-api.rst for a complete * description. */ void fscache_withdraw_cache(struct fscache_cache *cache) diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 0ce39658a620..751bc5b1cddf 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -4,7 +4,7 @@ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - * See Documentation/filesystems/caching/netfs-api.txt for more information on + * See Documentation/filesystems/caching/netfs-api.rst for more information on * the netfs API. */ diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 59c2494efda3..c1e6cc9091aa 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -51,8 +51,7 @@ static unsigned fscache_op_max_active = 2; static struct ctl_table_header *fscache_sysctl_header; static int fscache_max_active_sysctl(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { struct workqueue_struct **wqp = table->extra1; unsigned int *datap = table->data; diff --git a/fs/fscache/object.c b/fs/fscache/object.c index cfeba839a0f2..cb2146e02cd5 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -4,7 +4,7 @@ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - * See Documentation/filesystems/caching/object.txt for a description of the + * See Documentation/filesystems/caching/object.rst for a description of the * object state machine and the in-kernel representations. */ @@ -295,7 +295,7 @@ static void fscache_object_work_func(struct work_struct *work) * * Initialise a cache object description to its basic values. * - * See Documentation/filesystems/caching/backend-api.txt for a complete + * See Documentation/filesystems/caching/backend-api.rst for a complete * description. */ void fscache_object_init(struct fscache_object *object, diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index 1a22a55f75a0..4a5651d4904e 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -4,7 +4,7 @@ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - * See Documentation/filesystems/caching/operations.txt + * See Documentation/filesystems/caching/operations.rst */ #define FSCACHE_DEBUG_LEVEL OPERATION diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index eb2a585572dc..774b2618018a 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -12,7 +12,7 @@ config FUSE_FS although chances are your distribution already has that library installed if you've installed the "fuse" package itself. - See <file:Documentation/filesystems/fuse.txt> for more information. + See <file:Documentation/filesystems/fuse.rst> for more information. See <file:Documentation/Changes> for needed library/utility version. If you want to develop a userspace FS, or if you want to use diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 97eec7522bf2..8ccc97356cb5 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -805,7 +805,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) if (cs->len != PAGE_SIZE) goto out_fallback; - if (pipe_buf_steal(cs->pipe, buf) != 0) + if (!pipe_buf_try_steal(cs->pipe, buf)) goto out_fallback; newpage = buf->page; @@ -840,7 +840,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) get_page(newpage); if (!(buf->flags & PIPE_BUF_FLAG_LRU)) - lru_cache_add_file(newpage); + lru_cache_add(newpage); err = 0; spin_lock(&cs->req->waitq.lock); @@ -2081,7 +2081,7 @@ static void end_polls(struct fuse_conn *fc) * The same effect is usually achievable through killing the filesystem daemon * and all users of the filesystem. The exception is the combination of an * asynchronous request and the tricky deadlock (see - * Documentation/filesystems/fuse.txt). + * Documentation/filesystems/fuse.rst). * * Aborting requests under I/O goes as follows: 1: Separate out unlocked * requests, they should be finished off immediately. Locked requests will be diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9d67b830fb7a..bac51c32d660 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -915,84 +915,40 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) fuse_readpages_end(fc, &ap->args, err); } -struct fuse_fill_data { - struct fuse_io_args *ia; - struct file *file; - struct inode *inode; - unsigned int nr_pages; - unsigned int max_pages; -}; - -static int fuse_readpages_fill(void *_data, struct page *page) +static void fuse_readahead(struct readahead_control *rac) { - struct fuse_fill_data *data = _data; - struct fuse_io_args *ia = data->ia; - struct fuse_args_pages *ap = &ia->ap; - struct inode *inode = data->inode; + struct inode *inode = rac->mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); + unsigned int i, max_pages, nr_pages = 0; - fuse_wait_on_page_writeback(inode, page->index); - - if (ap->num_pages && - (ap->num_pages == fc->max_pages || - (ap->num_pages + 1) * PAGE_SIZE > fc->max_read || - ap->pages[ap->num_pages - 1]->index + 1 != page->index)) { - data->max_pages = min_t(unsigned int, data->nr_pages, - fc->max_pages); - fuse_send_readpages(ia, data->file); - data->ia = ia = fuse_io_alloc(NULL, data->max_pages); - if (!ia) { - unlock_page(page); - return -ENOMEM; - } - ap = &ia->ap; - } - - if (WARN_ON(ap->num_pages >= data->max_pages)) { - unlock_page(page); - fuse_io_free(ia); - return -EIO; - } - - get_page(page); - ap->pages[ap->num_pages] = page; - ap->descs[ap->num_pages].length = PAGE_SIZE; - ap->num_pages++; - data->nr_pages--; - return 0; -} - -static int fuse_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) -{ - struct inode *inode = mapping->host; - struct fuse_conn *fc = get_fuse_conn(inode); - struct fuse_fill_data data; - int err; - - err = -EIO; if (is_bad_inode(inode)) - goto out; + return; - data.file = file; - data.inode = inode; - data.nr_pages = nr_pages; - data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages); -; - data.ia = fuse_io_alloc(NULL, data.max_pages); - err = -ENOMEM; - if (!data.ia) - goto out; + max_pages = min_t(unsigned int, fc->max_pages, + fc->max_read / PAGE_SIZE); - err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); - if (!err) { - if (data.ia->ap.num_pages) - fuse_send_readpages(data.ia, file); - else - fuse_io_free(data.ia); + for (;;) { + struct fuse_io_args *ia; + struct fuse_args_pages *ap; + + nr_pages = readahead_count(rac) - nr_pages; + if (nr_pages > max_pages) + nr_pages = max_pages; + if (nr_pages == 0) + break; + ia = fuse_io_alloc(NULL, nr_pages); + if (!ia) + return; + ap = &ia->ap; + nr_pages = __readahead_batch(rac, ap->pages, nr_pages); + for (i = 0; i < nr_pages; i++) { + fuse_wait_on_page_writeback(inode, + readahead_index(rac) + i); + ap->descs[i].length = PAGE_SIZE; + } + ap->num_pages = nr_pages; + fuse_send_readpages(ia, rac->file); } -out: - return err; } static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to) @@ -3373,10 +3329,10 @@ static const struct file_operations fuse_file_operations = { static const struct address_space_operations fuse_file_aops = { .readpage = fuse_readpage, + .readahead = fuse_readahead, .writepage = fuse_writepage, .writepages = fuse_writepages, .launder_page = fuse_launder_page, - .readpages = fuse_readpages, .set_page_dirty = __set_page_dirty_nobuffers, .bmap = fuse_bmap, .direct_IO = fuse_direct_IO, diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 786c1ce8f030..72c9560f4467 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -577,7 +577,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, } /** - * gfs2_readpages - Read a bunch of pages at once + * gfs2_readahead - Read a bunch of pages at once * @file: The file to read from * @mapping: Address space info * @pages: List of pages to read @@ -590,31 +590,24 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, * obviously not something we'd want to do on too regular a basis. * Any I/O we ignore at this time will be done via readpage later. * 2. We don't handle stuffed files here we let readpage do the honours. - * 3. mpage_readpages() does most of the heavy lifting in the common case. + * 3. mpage_readahead() does most of the heavy lifting in the common case. * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. */ -static int gfs2_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void gfs2_readahead(struct readahead_control *rac) { - struct inode *inode = mapping->host; + struct inode *inode = rac->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_holder gh; - int ret; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); - ret = gfs2_glock_nq(&gh); - if (unlikely(ret)) + if (gfs2_glock_nq(&gh)) goto out_uninit; if (!gfs2_is_stuffed(ip)) - ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); + mpage_readahead(rac, gfs2_block_map); gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); - if (unlikely(gfs2_withdrawn(sdp))) - ret = -EIO; - return ret; } /** @@ -833,7 +826,7 @@ static const struct address_space_operations gfs2_aops = { .writepage = gfs2_writepage, .writepages = gfs2_writepages, .readpage = gfs2_readpage, - .readpages = gfs2_readpages, + .readahead = gfs2_readahead, .bmap = gfs2_bmap, .invalidatepage = gfs2_invalidatepage, .releasepage = gfs2_releasepage, @@ -847,7 +840,7 @@ static const struct address_space_operations gfs2_jdata_aops = { .writepage = gfs2_jdata_writepage, .writepages = gfs2_jdata_writepages, .readpage = gfs2_readpage, - .readpages = gfs2_readpages, + .readahead = gfs2_readahead, .set_page_dirty = jdata_set_page_dirty, .bmap = gfs2_bmap, .invalidatepage = gfs2_invalidatepage, diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index c3f7732415be..c0f2875c946c 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -354,7 +354,7 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN); if (hc == NULL) - hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL); + hc = __vmalloc(hsize, GFP_NOFS); if (hc == NULL) return ERR_PTR(-ENOMEM); @@ -1166,7 +1166,7 @@ static int dir_double_exhash(struct gfs2_inode *dip) hc2 = kmalloc_array(hsize_bytes, 2, GFP_NOFS | __GFP_NOWARN); if (hc2 == NULL) - hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL); + hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS); if (!hc2) return -ENOMEM; @@ -1327,7 +1327,7 @@ static void *gfs2_alloc_sort_buffer(unsigned size) if (size < KMALLOC_MAX_SIZE) ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN); if (!ptr) - ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL); + ptr = __vmalloc(size, GFP_NOFS); return ptr; } @@ -1987,8 +1987,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN); if (ht == NULL) - ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO, - PAGE_KERNEL); + ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO); if (!ht) return -ENOMEM; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 5acd3ce30759..a1337bf31e49 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -17,6 +17,7 @@ #include <linux/crc32.h> #include <linux/iomap.h> #include <linux/security.h> +#include <linux/fiemap.h> #include <linux/uaccess.h> #include "gfs2.h" diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 8259fef3f986..4b67d47a7e00 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1365,7 +1365,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); if (sdp->sd_quota_bitmap == NULL) sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | - __GFP_ZERO, PAGE_KERNEL); + __GFP_ZERO); if (!sdp->sd_quota_bitmap) return error; diff --git a/fs/hfs/Kconfig b/fs/hfs/Kconfig index 44f6e89bcb75..129926b5142d 100644 --- a/fs/hfs/Kconfig +++ b/fs/hfs/Kconfig @@ -6,7 +6,7 @@ config HFS_FS help If you say Y here, you will be able to mount Macintosh-formatted floppy disks and hard drive partitions with full read-write access. - Please read <file:Documentation/filesystems/hfs.txt> to learn about + Please read <file:Documentation/filesystems/hfs.rst> to learn about the available mount options. To compile this file system support as a module, choose M here: the diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c index 460281b1299e..cdf0edeeb278 100644 --- a/fs/hfs/mdb.c +++ b/fs/hfs/mdb.c @@ -32,29 +32,35 @@ static int hfs_get_last_session(struct super_block *sb, sector_t *start, sector_t *size) { - struct cdrom_multisession ms_info; - struct cdrom_tocentry te; - int res; + struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk); /* default values */ *start = 0; *size = i_size_read(sb->s_bdev->bd_inode) >> 9; if (HFS_SB(sb)->session >= 0) { + struct cdrom_tocentry te; + + if (!cdi) + return -EINVAL; + te.cdte_track = HFS_SB(sb)->session; te.cdte_format = CDROM_LBA; - res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te); - if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) { - *start = (sector_t)te.cdte_addr.lba << 2; - return 0; + if (cdrom_read_tocentry(cdi, &te) || + (te.cdte_ctrl & CDROM_DATA_TRACK) != 4) { + pr_err("invalid session number or type of track\n"); + return -EINVAL; } - pr_err("invalid session number or type of track\n"); - return -EINVAL; + + *start = (sector_t)te.cdte_addr.lba << 2; + } else if (cdi) { + struct cdrom_multisession ms_info; + + ms_info.addr_format = CDROM_LBA; + if (cdrom_multisession(cdi, &ms_info) == 0 && ms_info.xa_flag) + *start = (sector_t)ms_info.addr.lba << 2; } - ms_info.addr_format = CDROM_LBA; - res = ioctl_by_bdev(sb->s_bdev, CDROMMULTISESSION, (unsigned long)&ms_info); - if (!res && ms_info.xa_flag) - *start = (sector_t)ms_info.addr.lba << 2; + return 0; } diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 94bd83b36644..e3da9e96b835 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -340,7 +340,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, } if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) - blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); inode_unlock(inode); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 2b9e5743105e..129dca3f4b78 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -239,7 +239,7 @@ out: mutex_unlock(&sbi->vh_mutex); if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) - blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); + blkdev_issue_flush(sb->s_bdev, GFP_KERNEL); return error; } diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 08c1580bdf7a..61eec628805d 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -127,31 +127,34 @@ static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd) static int hfsplus_get_last_session(struct super_block *sb, sector_t *start, sector_t *size) { - struct cdrom_multisession ms_info; - struct cdrom_tocentry te; - int res; + struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk); /* default values */ *start = 0; *size = i_size_read(sb->s_bdev->bd_inode) >> 9; if (HFSPLUS_SB(sb)->session >= 0) { + struct cdrom_tocentry te; + + if (!cdi) + return -EINVAL; + te.cdte_track = HFSPLUS_SB(sb)->session; te.cdte_format = CDROM_LBA; - res = ioctl_by_bdev(sb->s_bdev, - CDROMREADTOCENTRY, (unsigned long)&te); - if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) { - *start = (sector_t)te.cdte_addr.lba << 2; - return 0; + if (cdrom_read_tocentry(cdi, &te) || + (te.cdte_ctrl & CDROM_DATA_TRACK) != 4) { + pr_err("invalid session number or type of track\n"); + return -EINVAL; } - pr_err("invalid session number or type of track\n"); - return -EINVAL; + *start = (sector_t)te.cdte_addr.lba << 2; + } else if (cdi) { + struct cdrom_multisession ms_info; + + ms_info.addr_format = CDROM_LBA; + if (cdrom_multisession(cdi, &ms_info) == 0 && ms_info.xa_flag) + *start = (sector_t)ms_info.addr.lba << 2; } - ms_info.addr_format = CDROM_LBA; - res = ioctl_by_bdev(sb->s_bdev, CDROMMULTISESSION, - (unsigned long)&ms_info); - if (!res && ms_info.xa_flag) - *start = (sector_t)ms_info.addr.lba << 2; + return 0; } diff --git a/fs/hpfs/Kconfig b/fs/hpfs/Kconfig index 56aa0336254a..2b36dc6f0a10 100644 --- a/fs/hpfs/Kconfig +++ b/fs/hpfs/Kconfig @@ -9,7 +9,7 @@ config HPFS_FS write files to an OS/2 HPFS partition on your hard drive. OS/2 floppies however are in regular MSDOS format, so you don't need this option in order to be able to read them. Read - <file:Documentation/filesystems/hpfs.txt>. + <file:Documentation/filesystems/hpfs.rst>. To compile this file system support as a module, choose M here: the module will be called hpfs. If unsure, say N. diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index b36abf9cb345..077c25128eb7 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -9,6 +9,7 @@ #include "hpfs_fn.h" #include <linux/mpage.h> +#include <linux/fiemap.h> #define BLOCKS(size) (((size) + 511) >> 9) @@ -125,10 +126,9 @@ static int hpfs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, hpfs_get_block, wbc); } -static int hpfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void hpfs_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); + mpage_readahead(rac, hpfs_get_block); } static int hpfs_writepages(struct address_space *mapping, @@ -198,7 +198,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, const struct address_space_operations hpfs_aops = { .readpage = hpfs_readpage, .writepage = hpfs_writepage, - .readpages = hpfs_readpages, + .readahead = hpfs_readahead, .writepages = hpfs_writepages, .write_begin = hpfs_write_begin, .write_end = hpfs_write_end, diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 991c60c7ffe0..f3420a643b4f 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -38,6 +38,7 @@ #include <linux/uio.h> #include <linux/uaccess.h> +#include <linux/sched/mm.h> static const struct super_operations hugetlbfs_ops; static const struct address_space_operations hugetlbfs_aops; @@ -191,13 +192,60 @@ out: #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA static unsigned long +hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long +hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (unlikely(offset_in_page(addr))) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = current->mm->mmap_base; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +static unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); - struct vm_unmapped_area_info info; if (len & ~huge_page_mask(h)) return -EINVAL; @@ -218,13 +266,16 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return addr; } - info.flags = 0; - info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = TASK_SIZE; - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - info.align_offset = 0; - return vm_unmapped_area(&info); + /* + * Use mm->get_unmapped_area value as a hint to use topdown routine. + * If architectures have special needs, they should define their own + * version of hugetlb_get_unmapped_area. + */ + if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); } #endif diff --git a/fs/inode.c b/fs/inode.c index 93d9252a00ab..72c4c347afb7 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -108,7 +108,7 @@ long get_nr_dirty_inodes(void) */ #ifdef CONFIG_SYSCTL int proc_nr_inodes(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { inodes_stat.nr_inodes = get_nr_inodes(); inodes_stat.nr_unused = get_nr_inodes_unused(); @@ -497,7 +497,7 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval) spin_lock(&inode_hash_lock); spin_lock(&inode->i_lock); - hlist_add_head(&inode->i_hash, b); + hlist_add_head_rcu(&inode->i_hash, b); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); } @@ -513,7 +513,7 @@ void __remove_inode_hash(struct inode *inode) { spin_lock(&inode_hash_lock); spin_lock(&inode->i_lock); - hlist_del_init(&inode->i_hash); + hlist_del_init_rcu(&inode->i_hash); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); } @@ -1107,7 +1107,7 @@ again: */ spin_lock(&inode->i_lock); inode->i_state |= I_NEW; - hlist_add_head(&inode->i_hash, head); + hlist_add_head_rcu(&inode->i_hash, head); spin_unlock(&inode->i_lock); if (!creating) inode_sb_list_add(inode); @@ -1201,7 +1201,7 @@ again: inode->i_ino = ino; spin_lock(&inode->i_lock); inode->i_state = I_NEW; - hlist_add_head(&inode->i_hash, head); + hlist_add_head_rcu(&inode->i_hash, head); spin_unlock(&inode->i_lock); inode_sb_list_add(inode); spin_unlock(&inode_hash_lock); @@ -1244,15 +1244,10 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino) struct hlist_head *b = inode_hashtable + hash(sb, ino); struct inode *inode; - spin_lock(&inode_hash_lock); - hlist_for_each_entry(inode, b, i_hash) { - if (inode->i_ino == ino && inode->i_sb == sb) { - spin_unlock(&inode_hash_lock); + hlist_for_each_entry_rcu(inode, b, i_hash) { + if (inode->i_ino == ino && inode->i_sb == sb) return 0; - } } - spin_unlock(&inode_hash_lock); - return 1; } @@ -1281,6 +1276,7 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved) static unsigned int counter; ino_t res; + rcu_read_lock(); spin_lock(&iunique_lock); do { if (counter <= max_reserved) @@ -1288,6 +1284,7 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved) res = counter++; } while (!test_inode_iunique(sb, res)); spin_unlock(&iunique_lock); + rcu_read_unlock(); return res; } @@ -1456,6 +1453,84 @@ out: } EXPORT_SYMBOL(find_inode_nowait); +/** + * find_inode_rcu - find an inode in the inode cache + * @sb: Super block of file system to search + * @hashval: Key to hash + * @test: Function to test match on an inode + * @data: Data for test function + * + * Search for the inode specified by @hashval and @data in the inode cache, + * where the helper function @test will return 0 if the inode does not match + * and 1 if it does. The @test function must be responsible for taking the + * i_lock spin_lock and checking i_state for an inode being freed or being + * initialized. + * + * If successful, this will return the inode for which the @test function + * returned 1 and NULL otherwise. + * + * The @test function is not permitted to take a ref on any inode presented. + * It is also not permitted to sleep. + * + * The caller must hold the RCU read lock. + */ +struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval, + int (*test)(struct inode *, void *), void *data) +{ + struct hlist_head *head = inode_hashtable + hash(sb, hashval); + struct inode *inode; + + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "suspicious find_inode_rcu() usage"); + + hlist_for_each_entry_rcu(inode, head, i_hash) { + if (inode->i_sb == sb && + !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) && + test(inode, data)) + return inode; + } + return NULL; +} +EXPORT_SYMBOL(find_inode_rcu); + +/** + * find_inode_by_rcu - Find an inode in the inode cache + * @sb: Super block of file system to search + * @ino: The inode number to match + * + * Search for the inode specified by @hashval and @data in the inode cache, + * where the helper function @test will return 0 if the inode does not match + * and 1 if it does. The @test function must be responsible for taking the + * i_lock spin_lock and checking i_state for an inode being freed or being + * initialized. + * + * If successful, this will return the inode for which the @test function + * returned 1 and NULL otherwise. + * + * The @test function is not permitted to take a ref on any inode presented. + * It is also not permitted to sleep. + * + * The caller must hold the RCU read lock. + */ +struct inode *find_inode_by_ino_rcu(struct super_block *sb, + unsigned long ino) +{ + struct hlist_head *head = inode_hashtable + hash(sb, ino); + struct inode *inode; + + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "suspicious find_inode_by_ino_rcu() usage"); + + hlist_for_each_entry_rcu(inode, head, i_hash) { + if (inode->i_ino == ino && + inode->i_sb == sb && + !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE))) + return inode; + } + return NULL; +} +EXPORT_SYMBOL(find_inode_by_ino_rcu); + int insert_inode_locked(struct inode *inode) { struct super_block *sb = inode->i_sb; @@ -1480,7 +1555,7 @@ int insert_inode_locked(struct inode *inode) if (likely(!old)) { spin_lock(&inode->i_lock); inode->i_state |= I_NEW | I_CREATING; - hlist_add_head(&inode->i_hash, head); + hlist_add_head_rcu(&inode->i_hash, head); spin_unlock(&inode->i_lock); spin_unlock(&inode_hash_lock); return 0; @@ -1540,6 +1615,7 @@ static void iput_final(struct inode *inode) { struct super_block *sb = inode->i_sb; const struct super_operations *op = inode->i_sb->s_op; + unsigned long state; int drop; WARN_ON(inode->i_state & I_NEW); @@ -1555,16 +1631,20 @@ static void iput_final(struct inode *inode) return; } + state = inode->i_state; if (!drop) { - inode->i_state |= I_WILL_FREE; + WRITE_ONCE(inode->i_state, state | I_WILL_FREE); spin_unlock(&inode->i_lock); + write_inode_now(inode, 1); + spin_lock(&inode->i_lock); - WARN_ON(inode->i_state & I_NEW); - inode->i_state &= ~I_WILL_FREE; + state = inode->i_state; + WARN_ON(state & I_NEW); + state &= ~I_WILL_FREE; } - inode->i_state |= I_FREEING; + WRITE_ONCE(inode->i_state, state | I_FREEING); if (!list_empty(&inode->i_lru)) inode_lru_list_del(inode); spin_unlock(&inode->i_lock); @@ -1606,14 +1686,14 @@ EXPORT_SYMBOL(iput); * @inode: inode owning the block number being requested * @block: pointer containing the block to find * - * Replaces the value in *block with the block number on the device holding + * Replaces the value in ``*block`` with the block number on the device holding * corresponding to the requested block number in the file. * That is, asked for block 4 of inode 1 the function will replace the - * 4 in *block, with disk block relative to the disk start that holds that + * 4 in ``*block``, with disk block relative to the disk start that holds that * block of the file. * * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a - * hole, returns 0 and *block is also set to 0. + * hole, returns 0 and ``*block`` is also set to 0. */ int bmap(struct inode *inode, sector_t *block) { diff --git a/fs/internal.h b/fs/internal.h index aa5d45524e87..9b863a7bd708 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -126,7 +126,6 @@ extern struct open_how build_open_how(int flags, umode_t mode); extern int build_open_flags(const struct open_how *how, struct open_flags *op); long do_sys_ftruncate(unsigned int fd, loff_t length, int small); -long do_faccessat(int dfd, const char __user *filename, int mode); int do_fchmodat(int dfd, const char __user *filename, umode_t mode); int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); @@ -143,8 +142,6 @@ extern int dentry_needs_remove_privs(struct dentry *dentry); /* * fs-writeback.c */ -extern void inode_io_list_del(struct inode *inode); - extern long get_nr_dirty_inodes(void); extern int invalidate_inodes(struct super_block *, bool); @@ -186,5 +183,5 @@ int sb_init_dio_done_wq(struct super_block *sb); /* * fs/stat.c: */ -unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags); -int cp_statx(const struct kstat *stat, struct statx __user *buffer); +int do_statx(int dfd, const char __user *filename, unsigned flags, + unsigned int mask, struct statx __user *buffer); diff --git a/fs/io_uring.c b/fs/io_uring.c index bb25e3997d41..9d4bd0d3a080 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -142,7 +142,7 @@ struct io_rings { */ u32 sq_dropped; /* - * Runtime flags + * Runtime SQ flags * * Written by the kernel, shouldn't be modified by the * application. @@ -152,6 +152,13 @@ struct io_rings { */ u32 sq_flags; /* + * Runtime CQ flags + * + * Written by the application, shouldn't be modified by the + * kernel. + */ + u32 cq_flags; + /* * Number of completion events lost because the queue was full; * this should be avoided by the application by making sure * there are not more requests pending than there is space in @@ -191,7 +198,7 @@ struct fixed_file_ref_node { struct list_head node; struct list_head file_list; struct fixed_file_data *file_data; - struct work_struct work; + struct llist_node llist; }; struct fixed_file_data { @@ -279,8 +286,8 @@ struct io_ring_ctx { const struct cred *creds; - /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */ - struct completion *completions; + struct completion ref_comp; + struct completion sq_thread_comp; /* if all else fails... */ struct io_kiocb *fallback_req; @@ -327,6 +334,9 @@ struct io_ring_ctx { struct list_head inflight_list; } ____cacheline_aligned_in_smp; + struct delayed_work file_put_work; + struct llist_head file_put_llist; + struct work_struct exit_work; }; @@ -384,7 +394,8 @@ struct io_timeout { struct file *file; u64 addr; int flags; - u32 count; + u32 off; + u32 target_seq; }; struct io_rw { @@ -415,11 +426,7 @@ struct io_sr_msg { struct io_open { struct file *file; int dfd; - union { - unsigned mask; - }; struct filename *filename; - struct statx __user *buffer; struct open_how how; unsigned long nofile; }; @@ -471,6 +478,15 @@ struct io_provide_buf { __u16 bid; }; +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + const char __user *filename; + struct statx __user *buffer; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -612,11 +628,11 @@ struct io_kiocb { struct io_epoll epoll; struct io_splice splice; struct io_provide_buf pbuf; + struct io_statx statx; }; struct io_async_ctx *io; int cflags; - bool needs_fixed_file; u8 opcode; u16 buf_index; @@ -788,7 +804,6 @@ static const struct io_op_def io_op_defs[] = { .needs_fs = 1, }, [IORING_OP_CLOSE] = { - .needs_file = 1, .file_table = 1, }, [IORING_OP_FILES_UPDATE] = { @@ -847,6 +862,11 @@ static const struct io_op_def io_op_defs[] = { }, [IORING_OP_PROVIDE_BUFFERS] = {}, [IORING_OP_REMOVE_BUFFERS] = {}, + [IORING_OP_TEE] = { + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + }, }; static void io_wq_submit_work(struct io_wq_work **workptr); @@ -882,11 +902,18 @@ struct sock *io_uring_get_socket(struct file *file) } EXPORT_SYMBOL(io_uring_get_socket); +static void io_file_put_work(struct work_struct *work); + +static inline bool io_async_submit(struct io_ring_ctx *ctx) +{ + return ctx->flags & IORING_SETUP_SQPOLL; +} + static void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); - complete(&ctx->completions[0]); + complete(&ctx->ref_comp); } static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) @@ -902,10 +929,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) if (!ctx->fallback_req) goto err; - ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL); - if (!ctx->completions) - goto err; - /* * Use 5 bits less than the max cq entries, that should give us around * 32 entries per hash list if totally full and uniformly spread. @@ -929,8 +952,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) init_waitqueue_head(&ctx->sqo_wait); init_waitqueue_head(&ctx->cq_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); - init_completion(&ctx->completions[0]); - init_completion(&ctx->completions[1]); + init_completion(&ctx->ref_comp); + init_completion(&ctx->sq_thread_comp); idr_init(&ctx->io_buffer_idr); idr_init(&ctx->personality_idr); mutex_init(&ctx->uring_lock); @@ -942,11 +965,12 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) init_waitqueue_head(&ctx->inflight_wait); spin_lock_init(&ctx->inflight_lock); INIT_LIST_HEAD(&ctx->inflight_list); + INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work); + init_llist_head(&ctx->file_put_llist); return ctx; err: if (ctx->fallback_req) kmem_cache_free(req_cachep, ctx->fallback_req); - kfree(ctx->completions); kfree(ctx->cancel_hash); kfree(ctx); return NULL; @@ -968,36 +992,6 @@ static inline bool req_need_defer(struct io_kiocb *req) return false; } -static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) -{ - struct io_kiocb *req; - - req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); - if (req && !req_need_defer(req)) { - list_del_init(&req->list); - return req; - } - - return NULL; -} - -static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) -{ - struct io_kiocb *req; - - req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); - if (req) { - if (req->flags & REQ_F_TIMEOUT_NOSEQ) - return NULL; - if (!__req_need_defer(req)) { - list_del_init(&req->list); - return req; - } - } - - return NULL; -} - static void __io_commit_cqring(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; @@ -1113,17 +1107,43 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx) spin_unlock_irq(&ctx->completion_lock); } -static void io_commit_cqring(struct io_ring_ctx *ctx) +static void __io_queue_deferred(struct io_ring_ctx *ctx) { - struct io_kiocb *req; + do { + struct io_kiocb *req = list_first_entry(&ctx->defer_list, + struct io_kiocb, list); - while ((req = io_get_timeout_req(ctx)) != NULL) + if (req_need_defer(req)) + break; + list_del_init(&req->list); + io_queue_async_work(req); + } while (!list_empty(&ctx->defer_list)); +} + +static void io_flush_timeouts(struct io_ring_ctx *ctx) +{ + while (!list_empty(&ctx->timeout_list)) { + struct io_kiocb *req = list_first_entry(&ctx->timeout_list, + struct io_kiocb, list); + + if (req->flags & REQ_F_TIMEOUT_NOSEQ) + break; + if (req->timeout.target_seq != ctx->cached_cq_tail + - atomic_read(&ctx->cq_timeouts)) + break; + + list_del_init(&req->list); io_kill_timeout(req); + } +} +static void io_commit_cqring(struct io_ring_ctx *ctx) +{ + io_flush_timeouts(ctx); __io_commit_cqring(ctx); - while ((req = io_get_deferred_req(ctx)) != NULL) - io_queue_async_work(req); + if (unlikely(!list_empty(&ctx->defer_list))) + __io_queue_deferred(ctx); } static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) @@ -1148,6 +1168,8 @@ static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) { if (!ctx->cq_ev_fd) return false; + if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) + return false; if (!ctx->eventfd_async) return true; return io_wq_current_is_worker(); @@ -1984,15 +2006,19 @@ static void io_iopoll_req_issued(struct io_kiocb *req) wake_up(&ctx->sqo_wait); } -static void io_file_put(struct io_submit_state *state) +static void __io_state_file_put(struct io_submit_state *state) { - if (state->file) { - int diff = state->has_refs - state->used_refs; + int diff = state->has_refs - state->used_refs; - if (diff) - fput_many(state->file, diff); - state->file = NULL; - } + if (diff) + fput_many(state->file, diff); + state->file = NULL; +} + +static inline void io_state_file_put(struct io_submit_state *state) +{ + if (state->file) + __io_state_file_put(state); } /* @@ -2011,7 +2037,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd) state->ios_left--; return state->file; } - io_file_put(state); + __io_state_file_put(state); } state->file = fget_many(fd, state->ios_left); if (!state->file) @@ -2727,7 +2753,8 @@ out_free: return ret; } -static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int __io_splice_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) { struct io_splice* sp = &req->splice; unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL; @@ -2737,8 +2764,6 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; sp->file_in = NULL; - sp->off_in = READ_ONCE(sqe->splice_off_in); - sp->off_out = READ_ONCE(sqe->off); sp->len = READ_ONCE(sqe->len); sp->flags = READ_ONCE(sqe->splice_flags); @@ -2757,6 +2782,46 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } +static int io_tee_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off)) + return -EINVAL; + return __io_splice_prep(req, sqe); +} + +static int io_tee(struct io_kiocb *req, bool force_nonblock) +{ + struct io_splice *sp = &req->splice; + struct file *in = sp->file_in; + struct file *out = sp->file_out; + unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; + long ret = 0; + + if (force_nonblock) + return -EAGAIN; + if (sp->len) + ret = do_tee(in, out, sp->len, flags); + + io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); + req->flags &= ~REQ_F_NEED_CLEANUP; + + io_cqring_add_event(req, ret); + if (ret != sp->len) + req_set_fail_links(req); + io_put_req(req); + return 0; +} + +static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_splice* sp = &req->splice; + + sp->off_in = READ_ONCE(sqe->splice_off_in); + sp->off_out = READ_ONCE(sqe->off); + return __io_splice_prep(req, sqe); +} + static int io_splice(struct io_kiocb *req, bool force_nonblock) { struct io_splice *sp = &req->splice; @@ -3305,43 +3370,23 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock) static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - const char __user *fname; - unsigned lookup_flags; - int ret; - if (sqe->ioprio || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) return -EBADF; - if (req->flags & REQ_F_NEED_CLEANUP) - return 0; - - req->open.dfd = READ_ONCE(sqe->fd); - req->open.mask = READ_ONCE(sqe->len); - fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); - req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); - req->open.how.flags = READ_ONCE(sqe->statx_flags); - - if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags)) - return -EINVAL; - req->open.filename = getname_flags(fname, lookup_flags, NULL); - if (IS_ERR(req->open.filename)) { - ret = PTR_ERR(req->open.filename); - req->open.filename = NULL; - return ret; - } + req->statx.dfd = READ_ONCE(sqe->fd); + req->statx.mask = READ_ONCE(sqe->len); + req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); + req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + req->statx.flags = READ_ONCE(sqe->statx_flags); - req->flags |= REQ_F_NEED_CLEANUP; return 0; } static int io_statx(struct io_kiocb *req, bool force_nonblock) { - struct io_open *ctx = &req->open; - unsigned lookup_flags; - struct path path; - struct kstat stat; + struct io_statx *ctx = &req->statx; int ret; if (force_nonblock) { @@ -3351,29 +3396,9 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock) return -EAGAIN; } - if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags)) - return -EINVAL; - -retry: - /* filename_lookup() drops it, keep a reference */ - ctx->filename->refcnt++; + ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask, + ctx->buffer); - ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path, - NULL); - if (ret) - goto err; - - ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags); - path_put(&path); - if (retry_estale(ret, lookup_flags)) { - lookup_flags |= LOOKUP_REVAL; - goto retry; - } - if (!ret) - ret = cp_statx(&stat, ctx->buffer); -err: - putname(ctx->filename); - req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < 0) req_set_fail_links(req); io_cqring_add_event(req, ret); @@ -3396,10 +3421,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EBADF; req->close.fd = READ_ONCE(sqe->fd); - if (req->file->f_op == &io_uring_fops || - req->close.fd == req->ctx->ring_fd) - return -EBADF; - return 0; } @@ -3432,21 +3453,14 @@ static int io_close(struct io_kiocb *req, bool force_nonblock) req->close.put_file = NULL; ret = __close_fd_get_file(req->close.fd, &req->close.put_file); if (ret < 0) - return ret; + return (ret == -ENOENT) ? -EBADF : ret; /* if the file has a flush method, be safe and punt to async */ if (req->close.put_file->f_op->flush && force_nonblock) { - /* submission ref will be dropped, take it for async */ - refcount_inc(&req->refs); - + /* avoid grabbing files - we don't need the files */ + req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT; req->work.func = io_close_finish; - /* - * Do manual async queue here to avoid grabbing files - we don't - * need the files, and it'll cause io_close_finish() to close - * the file again and cause a double CQE entry for this request - */ - io_queue_async_work(req); - return 0; + return -EAGAIN; } /* @@ -4096,27 +4110,6 @@ struct io_poll_table { int error; }; -static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, - struct wait_queue_head *head) -{ - if (unlikely(poll->head)) { - pt->error = -EINVAL; - return; - } - - pt->error = 0; - poll->head = head; - add_wait_queue(head, &poll->wait); -} - -static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, - struct poll_table_struct *p) -{ - struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); - - __io_queue_proc(&pt->req->apoll->poll, pt, head); -} - static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { @@ -4170,12 +4163,150 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) return false; } +static void io_poll_remove_double(struct io_kiocb *req) +{ + struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + + lockdep_assert_held(&req->ctx->completion_lock); + + if (poll && poll->head) { + struct wait_queue_head *head = poll->head; + + spin_lock(&head->lock); + list_del_init(&poll->wait.entry); + if (poll->wait.private) + refcount_dec(&req->refs); + poll->head = NULL; + spin_unlock(&head->lock); + } +} + +static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) +{ + struct io_ring_ctx *ctx = req->ctx; + + io_poll_remove_double(req); + req->poll.done = true; + io_cqring_fill_event(req, error ? error : mangle_poll(mask)); + io_commit_cqring(ctx); +} + +static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (io_poll_rewait(req, &req->poll)) { + spin_unlock_irq(&ctx->completion_lock); + return; + } + + hash_del(&req->hash_node); + io_poll_complete(req, req->result, 0); + req->flags |= REQ_F_COMP_LOCKED; + io_put_req_find_next(req, nxt); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); +} + +static void io_poll_task_func(struct callback_head *cb) +{ + struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct io_kiocb *nxt = NULL; + + io_poll_task_handler(req, &nxt); + if (nxt) { + struct io_ring_ctx *ctx = nxt->ctx; + + mutex_lock(&ctx->uring_lock); + __io_queue_sqe(nxt, NULL); + mutex_unlock(&ctx->uring_lock); + } +} + +static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, + int sync, void *key) +{ + struct io_kiocb *req = wait->private; + struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + __poll_t mask = key_to_poll(key); + + /* for instances that support it check for an event match first: */ + if (mask && !(mask & poll->events)) + return 0; + + if (req->poll.head) { + bool done; + + spin_lock(&req->poll.head->lock); + done = list_empty(&req->poll.wait.entry); + if (!done) + list_del_init(&req->poll.wait.entry); + spin_unlock(&req->poll.head->lock); + if (!done) + __io_async_wake(req, poll, mask, io_poll_task_func); + } + refcount_dec(&req->refs); + return 1; +} + +static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, + wait_queue_func_t wake_func) +{ + poll->head = NULL; + poll->done = false; + poll->canceled = false; + poll->events = events; + INIT_LIST_HEAD(&poll->wait.entry); + init_waitqueue_func_entry(&poll->wait, wake_func); +} + +static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, + struct wait_queue_head *head) +{ + struct io_kiocb *req = pt->req; + + /* + * If poll->head is already set, it's because the file being polled + * uses multiple waitqueues for poll handling (eg one for read, one + * for write). Setup a separate io_poll_iocb if this happens. + */ + if (unlikely(poll->head)) { + /* already have a 2nd entry, fail a third attempt */ + if (req->io) { + pt->error = -EINVAL; + return; + } + poll = kmalloc(sizeof(*poll), GFP_ATOMIC); + if (!poll) { + pt->error = -ENOMEM; + return; + } + io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake); + refcount_inc(&req->refs); + poll->wait.private = req; + req->io = (void *) poll; + } + + pt->error = 0; + poll->head = head; + add_wait_queue(head, &poll->wait); +} + +static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, + struct poll_table_struct *p) +{ + struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); + + __io_queue_proc(&pt->req->apoll->poll, pt, head); +} + static void io_async_task_func(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); struct async_poll *apoll = req->apoll; struct io_ring_ctx *ctx = req->ctx; - bool canceled; + bool canceled = false; trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); @@ -4184,34 +4315,33 @@ static void io_async_task_func(struct callback_head *cb) return; } - if (hash_hashed(&req->hash_node)) + /* If req is still hashed, it cannot have been canceled. Don't check. */ + if (hash_hashed(&req->hash_node)) { hash_del(&req->hash_node); - - canceled = READ_ONCE(apoll->poll.canceled); - if (canceled) { - io_cqring_fill_event(req, -ECANCELED); - io_commit_cqring(ctx); + } else { + canceled = READ_ONCE(apoll->poll.canceled); + if (canceled) { + io_cqring_fill_event(req, -ECANCELED); + io_commit_cqring(ctx); + } } spin_unlock_irq(&ctx->completion_lock); /* restore ->work in case we need to retry again */ memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll); - if (canceled) { - kfree(apoll); + if (!canceled) { + __set_current_state(TASK_RUNNING); + mutex_lock(&ctx->uring_lock); + __io_queue_sqe(req, NULL); + mutex_unlock(&ctx->uring_lock); + } else { io_cqring_ev_posted(ctx); req_set_fail_links(req); io_double_put_req(req); - return; } - - __set_current_state(TASK_RUNNING); - mutex_lock(&ctx->uring_lock); - __io_queue_sqe(req, NULL); - mutex_unlock(&ctx->uring_lock); - - kfree(apoll); } static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, @@ -4245,18 +4375,13 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req, bool cancel = false; poll->file = req->file; - poll->head = NULL; - poll->done = poll->canceled = false; - poll->events = mask; + io_init_poll_iocb(poll, mask, wake_func); + poll->wait.private = req; ipt->pt._key = mask; ipt->req = req; ipt->error = -EINVAL; - INIT_LIST_HEAD(&poll->wait.entry); - init_waitqueue_func_entry(&poll->wait, wake_func); - poll->wait.private = req; - mask = vfs_poll(req->file, &ipt->pt) & poll->events; spin_lock_irq(&ctx->completion_lock); @@ -4287,6 +4412,7 @@ static bool io_arm_poll_handler(struct io_kiocb *req) struct async_poll *apoll; struct io_poll_table ipt; __poll_t mask, ret; + bool had_io; if (!req->file || !file_can_poll(req->file)) return false; @@ -4301,6 +4427,7 @@ static bool io_arm_poll_handler(struct io_kiocb *req) req->flags |= REQ_F_POLLED; memcpy(&apoll->work, &req->work, sizeof(req->work)); + had_io = req->io != NULL; get_task_struct(current); req->task = current; @@ -4320,7 +4447,9 @@ static bool io_arm_poll_handler(struct io_kiocb *req) io_async_wake); if (ret) { ipt.error = 0; - apoll->poll.done = true; + /* only remove double add if we did it here */ + if (!had_io) + io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); memcpy(&req->work, &apoll->work, sizeof(req->work)); kfree(apoll); @@ -4344,32 +4473,32 @@ static bool __io_poll_remove_one(struct io_kiocb *req, do_complete = true; } spin_unlock(&poll->head->lock); + hash_del(&req->hash_node); return do_complete; } static bool io_poll_remove_one(struct io_kiocb *req) { - struct async_poll *apoll = NULL; bool do_complete; if (req->opcode == IORING_OP_POLL_ADD) { + io_poll_remove_double(req); do_complete = __io_poll_remove_one(req, &req->poll); } else { - apoll = req->apoll; + struct async_poll *apoll = req->apoll; + /* non-poll requests have submit ref still */ - do_complete = __io_poll_remove_one(req, &req->apoll->poll); - if (do_complete) + do_complete = __io_poll_remove_one(req, &apoll->poll); + if (do_complete) { io_put_req(req); - } - - hash_del(&req->hash_node); - - if (do_complete && apoll) { - /* - * restore ->work because we need to call io_req_work_drop_env. - */ - memcpy(&req->work, &apoll->work, sizeof(req->work)); - kfree(apoll); + /* + * restore ->work because we will call + * io_req_work_drop_env below when dropping the + * final reference. + */ + memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll); + } } if (do_complete) { @@ -4454,49 +4583,6 @@ static int io_poll_remove(struct io_kiocb *req) return 0; } -static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) -{ - struct io_ring_ctx *ctx = req->ctx; - - req->poll.done = true; - io_cqring_fill_event(req, error ? error : mangle_poll(mask)); - io_commit_cqring(ctx); -} - -static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) -{ - struct io_ring_ctx *ctx = req->ctx; - struct io_poll_iocb *poll = &req->poll; - - if (io_poll_rewait(req, poll)) { - spin_unlock_irq(&ctx->completion_lock); - return; - } - - hash_del(&req->hash_node); - io_poll_complete(req, req->result, 0); - req->flags |= REQ_F_COMP_LOCKED; - io_put_req_find_next(req, nxt); - spin_unlock_irq(&ctx->completion_lock); - - io_cqring_ev_posted(ctx); -} - -static void io_poll_task_func(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_kiocb *nxt = NULL; - - io_poll_task_handler(req, &nxt); - if (nxt) { - struct io_ring_ctx *ctx = nxt->ctx; - - mutex_lock(&ctx->uring_lock); - __io_queue_sqe(nxt, NULL); - mutex_unlock(&ctx->uring_lock); - } -} - static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, void *key) { @@ -4576,20 +4662,8 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) * We could be racing with timeout deletion. If the list is empty, * then timeout lookup already found it and will be handling it. */ - if (!list_empty(&req->list)) { - struct io_kiocb *prev; - - /* - * Adjust the reqs sequence before the current one because it - * will consume a slot in the cq_ring and the cq_tail - * pointer will be increased, otherwise other timeout reqs may - * return in advance without waiting for enough wait_nr. - */ - prev = req; - list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list) - prev->sequence++; + if (!list_empty(&req->list)) list_del_init(&req->list); - } io_cqring_fill_event(req, -ETIME); io_commit_cqring(ctx); @@ -4669,18 +4743,19 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, { struct io_timeout_data *data; unsigned flags; + u32 off = READ_ONCE(sqe->off); if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->buf_index || sqe->len != 1) return -EINVAL; - if (sqe->off && is_timeout_link) + if (off && is_timeout_link) return -EINVAL; flags = READ_ONCE(sqe->timeout_flags); if (flags & ~IORING_TIMEOUT_ABS) return -EINVAL; - req->timeout.count = READ_ONCE(sqe->off); + req->timeout.off = off; if (!req->io && io_alloc_async_ctx(req)) return -ENOMEM; @@ -4704,68 +4779,39 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_timeout(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct io_timeout_data *data; + struct io_timeout_data *data = &req->io->timeout; struct list_head *entry; - unsigned span = 0; - u32 count = req->timeout.count; - u32 seq = req->sequence; + u32 tail, off = req->timeout.off; - data = &req->io->timeout; + spin_lock_irq(&ctx->completion_lock); /* * sqe->off holds how many events that need to occur for this * timeout event to be satisfied. If it isn't set, then this is * a pure timeout request, sequence isn't used. */ - if (!count) { + if (!off) { req->flags |= REQ_F_TIMEOUT_NOSEQ; - spin_lock_irq(&ctx->completion_lock); entry = ctx->timeout_list.prev; goto add; } - req->sequence = seq + count; + tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); + req->timeout.target_seq = tail + off; /* * Insertion sort, ensuring the first entry in the list is always * the one we need first. */ - spin_lock_irq(&ctx->completion_lock); list_for_each_prev(entry, &ctx->timeout_list) { struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); - unsigned nxt_seq; - long long tmp, tmp_nxt; - u32 nxt_offset = nxt->timeout.count; if (nxt->flags & REQ_F_TIMEOUT_NOSEQ) continue; - - /* - * Since seq + count can overflow, use type long - * long to store it. - */ - tmp = (long long)seq + count; - nxt_seq = nxt->sequence - nxt_offset; - tmp_nxt = (long long)nxt_seq + nxt_offset; - - /* - * cached_sq_head may overflow, and it will never overflow twice - * once there is some timeout req still be valid. - */ - if (seq < nxt_seq) - tmp += UINT_MAX; - - if (tmp > tmp_nxt) + /* nxt.seq is behind @tail, otherwise would've been completed */ + if (off >= nxt->timeout.target_seq - tail) break; - - /* - * Sequence of reqs after the insert one and itself should - * be adjusted because each timeout req consumes a slot. - */ - span++; - nxt->sequence++; } - req->sequence -= span; add: list_add(&req->list, entry); data->timer.function = io_timeout_fn; @@ -4994,6 +5040,9 @@ static int io_req_defer_prep(struct io_kiocb *req, case IORING_OP_REMOVE_BUFFERS: ret = io_remove_buffers_prep(req, sqe); break; + case IORING_OP_TEE: + ret = io_tee_prep(req, sqe); + break; default: printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", req->opcode); @@ -5064,10 +5113,9 @@ static void io_cleanup_req(struct io_kiocb *req) break; case IORING_OP_OPENAT: case IORING_OP_OPENAT2: - case IORING_OP_STATX: - putname(req->open.filename); break; case IORING_OP_SPLICE: + case IORING_OP_TEE: io_put_file(req, req->splice.file_in, (req->splice.flags & SPLICE_F_FD_IN_FIXED)); break; @@ -5298,6 +5346,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, } ret = io_remove_buffers(req, force_nonblock); break; + case IORING_OP_TEE: + if (sqe) { + ret = io_tee_prep(req, sqe); + if (ret < 0) + break; + } + ret = io_tee(req, force_nonblock); + break; default: ret = -EINVAL; break; @@ -5367,7 +5423,7 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, struct fixed_file_table *table; table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT]; - return table->files[index & IORING_FILE_TABLE_MASK];; + return table->files[index & IORING_FILE_TABLE_MASK]; } static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, @@ -5403,7 +5459,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, bool fixed; fixed = (req->flags & REQ_F_FIXED_FILE) != 0; - if (unlikely(!fixed && req->needs_fixed_file)) + if (unlikely(!fixed && io_async_submit(req->ctx))) return -EBADF; return io_file_get(state, req, fd, &req->file, fixed); @@ -5638,7 +5694,7 @@ static inline void io_queue_link_head(struct io_kiocb *req) } static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_submit_state *state, struct io_kiocb **link) + struct io_kiocb **link) { struct io_ring_ctx *ctx = req->ctx; int ret; @@ -5711,7 +5767,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, static void io_submit_state_end(struct io_submit_state *state) { blk_finish_plug(&state->plug); - io_file_put(state); + io_state_file_put(state); if (state->free_reqs) kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); } @@ -5782,7 +5838,7 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx) static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_submit_state *state, bool async) + struct io_submit_state *state) { unsigned int sqe_flags; int id; @@ -5803,7 +5859,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, refcount_set(&req->refs, 2); req->task = NULL; req->result = 0; - req->needs_fixed_file = async; INIT_IO_WORK(&req->work, io_wq_submit_work); if (unlikely(req->opcode >= IORING_OP_LAST)) @@ -5833,9 +5888,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, } /* same numerical values with corresponding REQ_F_*, safe to copy */ - req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK | - IOSQE_ASYNC | IOSQE_FIXED_FILE | - IOSQE_BUFFER_SELECT | IOSQE_IO_LINK); + req->flags |= sqe_flags; if (!io_op_defs[req->opcode].needs_file) return 0; @@ -5844,7 +5897,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, } static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, - struct file *ring_file, int ring_fd, bool async) + struct file *ring_file, int ring_fd) { struct io_submit_state state, *statep = NULL; struct io_kiocb *link = NULL; @@ -5888,7 +5941,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, break; } - err = io_init_req(ctx, req, sqe, statep, async); + err = io_init_req(ctx, req, sqe, statep); io_consume_sqe(ctx); /* will complete beyond this point, count as submitted */ submitted++; @@ -5901,8 +5954,8 @@ fail_req: } trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, - true, async); - err = io_submit_sqe(req, sqe, statep, &link); + true, io_async_submit(ctx)); + err = io_submit_sqe(req, sqe, &link); if (err) goto fail_req; } @@ -5942,7 +5995,7 @@ static int io_sq_thread(void *data) unsigned long timeout; int ret = 0; - complete(&ctx->completions[1]); + complete(&ctx->sq_thread_comp); old_fs = get_fs(); set_fs(USER_DS); @@ -6041,7 +6094,8 @@ static int io_sq_thread(void *data) } mutex_lock(&ctx->uring_lock); - ret = io_submit_sqes(ctx, to_submit, NULL, -1, true); + if (likely(!percpu_ref_is_dying(&ctx->refs))) + ret = io_submit_sqes(ctx, to_submit, NULL, -1); mutex_unlock(&ctx->uring_lock); timeout = jiffies + ctx->sq_thread_idle; } @@ -6189,22 +6243,22 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) struct fixed_file_data *data = ctx->file_data; struct fixed_file_ref_node *ref_node = NULL; unsigned nr_tables, i; - unsigned long flags; if (!data) return -ENXIO; - spin_lock_irqsave(&data->lock, flags); + spin_lock(&data->lock); if (!list_empty(&data->ref_list)) ref_node = list_first_entry(&data->ref_list, struct fixed_file_ref_node, node); - spin_unlock_irqrestore(&data->lock, flags); + spin_unlock(&data->lock); if (ref_node) percpu_ref_kill(&ref_node->refs); percpu_ref_kill(&data->refs); /* wait for all refs nodes to complete */ + flush_delayed_work(&ctx->file_put_work); wait_for_completion(&data->done); __io_sqe_files_unregister(ctx); @@ -6222,7 +6276,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) static void io_sq_thread_stop(struct io_ring_ctx *ctx) { if (ctx->sqo_thread) { - wait_for_completion(&ctx->completions[1]); + wait_for_completion(&ctx->sq_thread_comp); /* * The park is a bit of a work-around, without it we get * warning spews on shutdown with SQPOLL set and affinity @@ -6435,40 +6489,63 @@ struct io_file_put { struct file *file; }; -static void io_file_put_work(struct work_struct *work) +static void __io_file_put_work(struct fixed_file_ref_node *ref_node) { - struct fixed_file_ref_node *ref_node; - struct fixed_file_data *file_data; - struct io_ring_ctx *ctx; + struct fixed_file_data *file_data = ref_node->file_data; + struct io_ring_ctx *ctx = file_data->ctx; struct io_file_put *pfile, *tmp; - unsigned long flags; - - ref_node = container_of(work, struct fixed_file_ref_node, work); - file_data = ref_node->file_data; - ctx = file_data->ctx; list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) { - list_del_init(&pfile->list); + list_del(&pfile->list); io_ring_file_put(ctx, pfile->file); kfree(pfile); } - spin_lock_irqsave(&file_data->lock, flags); - list_del_init(&ref_node->node); - spin_unlock_irqrestore(&file_data->lock, flags); + spin_lock(&file_data->lock); + list_del(&ref_node->node); + spin_unlock(&file_data->lock); percpu_ref_exit(&ref_node->refs); kfree(ref_node); percpu_ref_put(&file_data->refs); } +static void io_file_put_work(struct work_struct *work) +{ + struct io_ring_ctx *ctx; + struct llist_node *node; + + ctx = container_of(work, struct io_ring_ctx, file_put_work.work); + node = llist_del_all(&ctx->file_put_llist); + + while (node) { + struct fixed_file_ref_node *ref_node; + struct llist_node *next = node->next; + + ref_node = llist_entry(node, struct fixed_file_ref_node, llist); + __io_file_put_work(ref_node); + node = next; + } +} + static void io_file_data_ref_zero(struct percpu_ref *ref) { struct fixed_file_ref_node *ref_node; + struct io_ring_ctx *ctx; + bool first_add; + int delay = HZ; ref_node = container_of(ref, struct fixed_file_ref_node, refs); + ctx = ref_node->file_data->ctx; + + if (percpu_ref_is_dying(&ctx->file_data->refs)) + delay = 0; - queue_work(system_wq, &ref_node->work); + first_add = llist_add(&ref_node->llist, &ctx->file_put_llist); + if (!delay) + mod_delayed_work(system_wq, &ctx->file_put_work, 0); + else if (first_add) + queue_delayed_work(system_wq, &ctx->file_put_work, delay); } static struct fixed_file_ref_node *alloc_fixed_file_ref_node( @@ -6487,10 +6564,8 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node( } INIT_LIST_HEAD(&ref_node->node); INIT_LIST_HEAD(&ref_node->file_list); - INIT_WORK(&ref_node->work, io_file_put_work); ref_node->file_data = ctx->file_data; return ref_node; - } static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node) @@ -6508,7 +6583,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, int fd, ret = 0; unsigned i; struct fixed_file_ref_node *ref_node; - unsigned long flags; if (ctx->file_data) return -EBUSY; @@ -6616,9 +6690,9 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, } ctx->file_data->cur_refs = &ref_node->refs; - spin_lock_irqsave(&ctx->file_data->lock, flags); + spin_lock(&ctx->file_data->lock); list_add(&ref_node->node, &ctx->file_data->ref_list); - spin_unlock_irqrestore(&ctx->file_data->lock, flags); + spin_unlock(&ctx->file_data->lock); percpu_ref_get(&ctx->file_data->refs); return ret; } @@ -6694,7 +6768,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, __s32 __user *fds; int fd, i, err; __u32 done; - unsigned long flags; bool needs_switch = false; if (check_add_overflow(up->offset, nr_args, &done)) @@ -6759,10 +6832,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, if (needs_switch) { percpu_ref_kill(data->cur_refs); - spin_lock_irqsave(&data->lock, flags); + spin_lock(&data->lock); list_add(&ref_node->node, &data->ref_list); data->cur_refs = &ref_node->refs; - spin_unlock_irqrestore(&data->lock, flags); + spin_unlock(&data->lock); percpu_ref_get(&ctx->file_data->refs); } else destroy_fixed_file_ref_node(ref_node); @@ -7250,7 +7323,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) ring_pages(ctx->sq_entries, ctx->cq_entries)); free_uid(ctx->user); put_cred(ctx->creds); - kfree(ctx->completions); kfree(ctx->cancel_hash); kmem_cache_free(req_cachep, ctx->fallback_req); kfree(ctx); @@ -7302,7 +7374,7 @@ static void io_ring_exit_work(struct work_struct *work) if (ctx->rings) io_cqring_overflow_flush(ctx, true); - wait_for_completion(&ctx->completions[0]); + wait_for_completion(&ctx->ref_comp); io_ring_ctx_free(ctx); } @@ -7312,16 +7384,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) percpu_ref_kill(&ctx->refs); mutex_unlock(&ctx->uring_lock); - /* - * Wait for sq thread to idle, if we have one. It won't spin on new - * work after we've killed the ctx ref above. This is important to do - * before we cancel existing commands, as the thread could otherwise - * be queueing new work post that. If that's work we need to cancel, - * it could cause shutdown to hang. - */ - while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait)) - cond_resched(); - io_kill_timeouts(ctx); io_poll_remove_all(ctx); @@ -7390,14 +7452,15 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, * all we had, then we're done with this request. */ if (refcount_sub_and_test(2, &cancel_req->refs)) { - io_put_req(cancel_req); + io_free_req(cancel_req); finish_wait(&ctx->inflight_wait, &wait); continue; } + } else { + io_wq_cancel_work(ctx->io_wq, &cancel_req->work); + io_put_req(cancel_req); } - io_wq_cancel_work(ctx->io_wq, &cancel_req->work); - io_put_req(cancel_req); schedule(); finish_wait(&ctx->inflight_wait, &wait); } @@ -7530,7 +7593,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, submitted = to_submit; } else if (to_submit) { mutex_lock(&ctx->uring_lock); - submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false); + submitted = io_submit_sqes(ctx, to_submit, f.file, fd); mutex_unlock(&ctx->uring_lock); if (submitted != to_submit) @@ -7841,6 +7904,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); p->cq_off.cqes = offsetof(struct io_rings, cqes); + p->cq_off.flags = offsetof(struct io_rings, cq_flags); p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | @@ -8001,7 +8065,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, * after we've killed the percpu ref. */ mutex_unlock(&ctx->uring_lock); - ret = wait_for_completion_interruptible(&ctx->completions[0]); + ret = wait_for_completion_interruptible(&ctx->ref_comp); mutex_lock(&ctx->uring_lock); if (ret) { percpu_ref_resurrect(&ctx->refs); @@ -8078,7 +8142,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, /* bring the ctx back to life */ percpu_ref_reinit(&ctx->refs); out: - reinit_completion(&ctx->completions[0]); + reinit_completion(&ctx->ref_comp); } return ret; } diff --git a/fs/ioctl.c b/fs/ioctl.c index 5e80b40bc1b5..d69786d1dd91 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -18,6 +18,7 @@ #include <linux/buffer_head.h> #include <linux/falloc.h> #include <linux/sched/signal.h> +#include <linux/fiemap.h> #include "internal.h" @@ -148,61 +149,55 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical, EXPORT_SYMBOL(fiemap_fill_next_extent); /** - * fiemap_check_flags - check validity of requested flags for fiemap + * fiemap_prep - check validity of requested flags for fiemap + * @inode: Inode to operate on * @fieinfo: Fiemap context passed into ->fiemap - * @fs_flags: Set of fiemap flags that the file system understands + * @start: Start of the mapped range + * @len: Length of the mapped range, can be truncated by this function. + * @supported_flags: Set of fiemap flags that the file system understands * - * Called from file system ->fiemap callback. This will compute the - * intersection of valid fiemap flags and those that the fs supports. That - * value is then compared against the user supplied flags. In case of bad user - * flags, the invalid values will be written into the fieinfo structure, and - * -EBADR is returned, which tells ioctl_fiemap() to return those values to - * userspace. For this reason, a return code of -EBADR should be preserved. + * This function must be called from each ->fiemap instance to validate the + * fiemap request against the file system parameters. * - * Returns 0 on success, -EBADR on bad flags. + * Returns 0 on success, or a negative error on failure. */ -int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags) +int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 *len, u32 supported_flags) { + u64 maxbytes = inode->i_sb->s_maxbytes; u32 incompat_flags; + int ret = 0; - incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags); - if (incompat_flags) { - fieinfo->fi_flags = incompat_flags; - return -EBADR; - } - return 0; -} -EXPORT_SYMBOL(fiemap_check_flags); - -static int fiemap_check_ranges(struct super_block *sb, - u64 start, u64 len, u64 *new_len) -{ - u64 maxbytes = (u64) sb->s_maxbytes; - - *new_len = len; - - if (len == 0) + if (*len == 0) return -EINVAL; - if (start > maxbytes) return -EFBIG; /* * Shrink request scope to what the fs can actually handle. */ - if (len > maxbytes || (maxbytes - len) < start) - *new_len = maxbytes - start; + if (*len > maxbytes || (maxbytes - *len) < start) + *len = maxbytes - start; + + supported_flags |= FIEMAP_FLAG_SYNC; + supported_flags &= FIEMAP_FLAGS_COMPAT; + incompat_flags = fieinfo->fi_flags & ~supported_flags; + if (incompat_flags) { + fieinfo->fi_flags = incompat_flags; + return -EBADR; + } - return 0; + if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) + ret = filemap_write_and_wait(inode->i_mapping); + return ret; } +EXPORT_SYMBOL(fiemap_prep); static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap) { struct fiemap fiemap; struct fiemap_extent_info fieinfo = { 0, }; struct inode *inode = file_inode(filp); - struct super_block *sb = inode->i_sb; - u64 len; int error; if (!inode->i_op->fiemap) @@ -214,24 +209,13 @@ static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap) if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS) return -EINVAL; - error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length, - &len); - if (error) - return error; - fieinfo.fi_flags = fiemap.fm_flags; fieinfo.fi_extents_max = fiemap.fm_extent_count; fieinfo.fi_extents_start = ufiemap->fm_extents; - if (fiemap.fm_extent_count != 0 && - !access_ok(fieinfo.fi_extents_start, - fieinfo.fi_extents_max * sizeof(struct fiemap_extent))) - return -EFAULT; - - if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC) - filemap_write_and_wait(inode->i_mapping); + error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, + fiemap.fm_length); - error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len); fiemap.fm_flags = fieinfo.fi_flags; fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap))) @@ -307,8 +291,7 @@ static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) * If you use this function directly, you need to do your own locking. Use * generic_block_fiemap if you want the locking done for you. */ - -int __generic_block_fiemap(struct inode *inode, +static int __generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, get_block_t *get_block) { @@ -320,7 +303,7 @@ int __generic_block_fiemap(struct inode *inode, bool past_eof = false, whole_file = false; int ret = 0; - ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); + ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_SYNC); if (ret) return ret; @@ -453,7 +436,6 @@ int __generic_block_fiemap(struct inode *inode, return ret; } -EXPORT_SYMBOL(__generic_block_fiemap); /** * generic_block_fiemap - FIEMAP for block based inodes diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 89e21961d1ad..a1ed7620fbac 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -59,24 +59,19 @@ iomap_page_create(struct inode *inode, struct page *page) * migrate_page_move_mapping() assumes that pages with private data have * their count elevated by 1. */ - get_page(page); - set_page_private(page, (unsigned long)iop); - SetPagePrivate(page); + attach_page_private(page, iop); return iop; } static void iomap_page_release(struct page *page) { - struct iomap_page *iop = to_iomap_page(page); + struct iomap_page *iop = detach_page_private(page); if (!iop) return; WARN_ON_ONCE(atomic_read(&iop->read_count)); WARN_ON_ONCE(atomic_read(&iop->write_count)); - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); kfree(iop); } @@ -214,9 +209,8 @@ iomap_read_end_io(struct bio *bio) struct iomap_readpage_ctx { struct page *cur_page; bool cur_page_in_bio; - bool is_readahead; struct bio *bio; - struct list_head *pages; + struct readahead_control *rac; }; static void @@ -308,7 +302,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, if (ctx->bio) submit_bio(ctx->bio); - if (ctx->is_readahead) /* same as readahead_gfp_mask */ + if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); /* @@ -319,7 +313,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, if (!ctx->bio) ctx->bio = bio_alloc(orig_gfp, 1); ctx->bio->bi_opf = REQ_OP_READ; - if (ctx->is_readahead) + if (ctx->rac) ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; bio_set_dev(ctx->bio, iomap->bdev); @@ -367,7 +361,7 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops) } /* - * Just like mpage_readpages and block_read_full_page we always + * Just like mpage_readahead and block_read_full_page we always * return 0 and just mark the page as PageError on errors. This * should be cleaned up all through the stack eventually. */ @@ -375,36 +369,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops) } EXPORT_SYMBOL_GPL(iomap_readpage); -static struct page * -iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, - loff_t length, loff_t *done) -{ - while (!list_empty(pages)) { - struct page *page = lru_to_page(pages); - - if (page_offset(page) >= (u64)pos + length) - break; - - list_del(&page->lru); - if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, - GFP_NOFS)) - return page; - - /* - * If we already have a page in the page cache at index we are - * done. Upper layers don't care if it is uptodate after the - * readpages call itself as every page gets checked again once - * actually needed. - */ - *done += PAGE_SIZE; - put_page(page); - } - - return NULL; -} - static loff_t -iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, +iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length, void *data, struct iomap *iomap, struct iomap *srcmap) { struct iomap_readpage_ctx *ctx = data; @@ -418,10 +384,7 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, ctx->cur_page = NULL; } if (!ctx->cur_page) { - ctx->cur_page = iomap_next_page(inode, ctx->pages, - pos, length, &done); - if (!ctx->cur_page) - break; + ctx->cur_page = readahead_page(ctx->rac); ctx->cur_page_in_bio = false; } ret = iomap_readpage_actor(inode, pos + done, length - done, @@ -431,32 +394,43 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, return done; } -int -iomap_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages, const struct iomap_ops *ops) +/** + * iomap_readahead - Attempt to read pages from a file. + * @rac: Describes the pages to be read. + * @ops: The operations vector for the filesystem. + * + * This function is for filesystems to call to implement their readahead + * address_space operation. + * + * Context: The @ops callbacks may submit I/O (eg to read the addresses of + * blocks from disc), and may wait for it. The caller may be trying to + * access a different page, and so sleeping excessively should be avoided. + * It may allocate memory, but should avoid costly allocations. This + * function is called with memalloc_nofs set, so allocations will not cause + * the filesystem to be reentered. + */ +void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) { + struct inode *inode = rac->mapping->host; + loff_t pos = readahead_pos(rac); + loff_t length = readahead_length(rac); struct iomap_readpage_ctx ctx = { - .pages = pages, - .is_readahead = true, + .rac = rac, }; - loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); - loff_t last = page_offset(list_entry(pages->next, struct page, lru)); - loff_t length = last - pos + PAGE_SIZE, ret = 0; - trace_iomap_readpages(mapping->host, nr_pages); + trace_iomap_readahead(inode, readahead_count(rac)); while (length > 0) { - ret = iomap_apply(mapping->host, pos, length, 0, ops, - &ctx, iomap_readpages_actor); + loff_t ret = iomap_apply(inode, pos, length, 0, ops, + &ctx, iomap_readahead_actor); if (ret <= 0) { WARN_ON_ONCE(ret == 0); - goto done; + break; } pos += ret; length -= ret; } - ret = 0; -done: + if (ctx.bio) submit_bio(ctx.bio); if (ctx.cur_page) { @@ -464,15 +438,8 @@ done: unlock_page(ctx.cur_page); put_page(ctx.cur_page); } - - /* - * Check that we didn't lose a page due to the arcance calling - * conventions.. - */ - WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); - return ret; } -EXPORT_SYMBOL_GPL(iomap_readpages); +EXPORT_SYMBOL_GPL(iomap_readahead); /* * iomap_is_partially_uptodate checks whether blocks within a page are @@ -554,14 +521,8 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage, if (ret != MIGRATEPAGE_SUCCESS) return ret; - if (page_has_private(page)) { - ClearPagePrivate(page); - get_page(newpage); - set_page_private(newpage, page_private(page)); - set_page_private(page, 0); - put_page(page); - SetPagePrivate(newpage); - } + if (page_has_private(page)) + attach_page_private(newpage, detach_page_private(page)); if (mode != MIGRATE_SYNC_NO_COPY) migrate_page_copy(newpage, page); diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 20dde5aadcdd..ec7b78e6feca 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -59,7 +59,7 @@ int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) EXPORT_SYMBOL_GPL(iomap_dio_iopoll); static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap, - struct bio *bio) + struct bio *bio, loff_t pos) { atomic_inc(&dio->ref); @@ -67,7 +67,12 @@ static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap, bio_set_polled(bio, dio->iocb); dio->submit.last_queue = bdev_get_queue(iomap->bdev); - dio->submit.cookie = submit_bio(bio); + if (dio->dops && dio->dops->submit_io) + dio->submit.cookie = dio->dops->submit_io( + file_inode(dio->iocb->ki_filp), + iomap, bio, pos); + else + dio->submit.cookie = submit_bio(bio); } static ssize_t iomap_dio_complete(struct iomap_dio *dio) @@ -191,7 +196,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, get_page(page); __bio_add_page(bio, page, len, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, flags); - iomap_dio_submit_bio(dio, iomap, bio); + iomap_dio_submit_bio(dio, iomap, bio, pos); } static loff_t @@ -299,11 +304,11 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, } dio->size += n; - pos += n; copied += n; nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES); - iomap_dio_submit_bio(dio, iomap, bio); + iomap_dio_submit_bio(dio, iomap, bio, pos); + pos += n; } while (nr_pages); /* @@ -411,8 +416,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, struct blk_plug plug; struct iomap_dio *dio; - lockdep_assert_held(&inode->i_rwsem); - if (!count) return 0; @@ -561,7 +564,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, !dio->submit.last_queue || !blk_poll(dio->submit.last_queue, dio->submit.cookie, true)) - io_schedule(); + blk_io_schedule(); } __set_current_state(TASK_RUNNING); } diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c index d55e8f491a5e..aab070df4a21 100644 --- a/fs/iomap/fiemap.c +++ b/fs/iomap/fiemap.c @@ -6,6 +6,7 @@ #include <linux/compiler.h> #include <linux/fs.h> #include <linux/iomap.h> +#include <linux/fiemap.h> struct fiemap_ctx { struct fiemap_extent_info *fi; @@ -65,7 +66,7 @@ iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, } int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, - loff_t start, loff_t len, const struct iomap_ops *ops) + u64 start, u64 len, const struct iomap_ops *ops) { struct fiemap_ctx ctx; loff_t ret; @@ -74,16 +75,10 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, ctx.fi = fi; ctx.prev.type = IOMAP_HOLE; - ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC); + ret = fiemap_prep(inode, fi, start, &len, 0); if (ret) return ret; - if (fi->fi_flags & FIEMAP_FLAG_SYNC) { - ret = filemap_write_and_wait(inode->i_mapping); - if (ret) - return ret; - } - while (len > 0) { ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, iomap_fiemap_actor); diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h index 4df19c66f597..5693a39d52fb 100644 --- a/fs/iomap/trace.h +++ b/fs/iomap/trace.h @@ -39,7 +39,7 @@ DEFINE_EVENT(iomap_readpage_class, name, \ TP_PROTO(struct inode *inode, int nr_pages), \ TP_ARGS(inode, nr_pages)) DEFINE_READPAGE_EVENT(iomap_readpage); -DEFINE_READPAGE_EVENT(iomap_readpages); +DEFINE_READPAGE_EVENT(iomap_readahead); DECLARE_EVENT_CLASS(iomap_range_class, TP_PROTO(struct inode *inode, unsigned long off, unsigned int len), diff --git a/fs/isofs/Kconfig b/fs/isofs/Kconfig index 5e7419599f50..08ffd37b9bb8 100644 --- a/fs/isofs/Kconfig +++ b/fs/isofs/Kconfig @@ -8,7 +8,7 @@ config ISO9660_FS long Unix filenames and symbolic links are also supported by this driver. If you have a CD-ROM drive and want to do more with it than just listen to audio CDs and watch its LEDs, say Y (and read - <file:Documentation/filesystems/isofs.txt> and the CD-ROM-HOWTO, + <file:Documentation/filesystems/isofs.rst> and the CD-ROM-HOWTO, available from <http://www.tldp.org/docs.html#howto>), thereby enlarging your kernel by about 27 KB; otherwise say N. diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 62c0462dc89f..d634561f871a 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -544,43 +544,41 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root) static unsigned int isofs_get_last_session(struct super_block *sb, s32 session) { - struct cdrom_multisession ms_info; - unsigned int vol_desc_start; - struct block_device *bdev = sb->s_bdev; - int i; + struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk); + unsigned int vol_desc_start = 0; - vol_desc_start=0; - ms_info.addr_format=CDROM_LBA; if (session > 0) { - struct cdrom_tocentry Te; - Te.cdte_track=session; - Te.cdte_format=CDROM_LBA; - i = ioctl_by_bdev(bdev, CDROMREADTOCENTRY, (unsigned long) &Te); - if (!i) { + struct cdrom_tocentry te; + + if (!cdi) + return 0; + + te.cdte_track = session; + te.cdte_format = CDROM_LBA; + if (cdrom_read_tocentry(cdi, &te) == 0) { printk(KERN_DEBUG "ISOFS: Session %d start %d type %d\n", - session, Te.cdte_addr.lba, - Te.cdte_ctrl&CDROM_DATA_TRACK); - if ((Te.cdte_ctrl&CDROM_DATA_TRACK) == 4) - return Te.cdte_addr.lba; + session, te.cdte_addr.lba, + te.cdte_ctrl & CDROM_DATA_TRACK); + if ((te.cdte_ctrl & CDROM_DATA_TRACK) == 4) + return te.cdte_addr.lba; } printk(KERN_ERR "ISOFS: Invalid session number or type of track\n"); } - i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long) &ms_info); - if (session > 0) - printk(KERN_ERR "ISOFS: Invalid session number\n"); -#if 0 - printk(KERN_DEBUG "isofs.inode: CDROMMULTISESSION: rc=%d\n",i); - if (i==0) { - printk(KERN_DEBUG "isofs.inode: XA disk: %s\n",ms_info.xa_flag?"yes":"no"); - printk(KERN_DEBUG "isofs.inode: vol_desc_start = %d\n", ms_info.addr.lba); - } -#endif - if (i==0) + + if (cdi) { + struct cdrom_multisession ms_info; + + ms_info.addr_format = CDROM_LBA; + if (cdrom_multisession(cdi, &ms_info) == 0) { #if WE_OBEY_THE_WRITTEN_STANDARDS - if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */ + /* necessary for a valid ms_info.addr */ + if (ms_info.xa_flag) #endif - vol_desc_start=ms_info.addr.lba; + vol_desc_start = ms_info.addr.lba; + } + } + return vol_desc_start; } @@ -1185,10 +1183,9 @@ static int isofs_readpage(struct file *file, struct page *page) return mpage_readpage(page, isofs_get_block); } -static int isofs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void isofs_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, isofs_get_block); + mpage_readahead(rac, isofs_get_block); } static sector_t _isofs_bmap(struct address_space *mapping, sector_t block) @@ -1198,7 +1195,7 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations isofs_aops = { .readpage = isofs_readpage, - .readpages = isofs_readpages, + .readahead = isofs_readahead, .bmap = _isofs_bmap }; diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 96bf33986d03..263f02ad8ebf 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -414,7 +414,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal) * jbd2_cleanup_journal_tail() doesn't get called all that often. */ if (journal->j_flags & JBD2_BARRIER) - blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL); + blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS); return __jbd2_update_log_tail(journal, first_tid, blocknr); } diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index e855d8260433..6d2da8ad0e6f 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -775,7 +775,7 @@ start_journal_io: if (commit_transaction->t_need_data_flush && (journal->j_fs_dev != journal->j_dev) && (journal->j_flags & JBD2_BARRIER)) - blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL); + blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS); /* Done it all: now write the commit record asynchronously. */ if (jbd2_has_feature_async_commit(journal)) { @@ -882,7 +882,7 @@ start_journal_io: stats.run.rs_blocks_logged++; if (jbd2_has_feature_async_commit(journal) && journal->j_flags & JBD2_BARRIER) { - blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL); + blkdev_issue_flush(journal->j_dev, GFP_NOFS); } if (err) diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index a4967b27ffb6..2ed278f0dced 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -286,7 +286,7 @@ int jbd2_journal_recover(journal_t *journal) err = err2; /* Make sure all replayed data is on permanent storage */ if (journal->j_flags & JBD2_BARRIER) { - err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); + err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL); if (!err) err = err2; } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 3dccc23cf010..e91aad3637a2 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -541,17 +541,24 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) } EXPORT_SYMBOL(jbd2_journal_start); -static void __jbd2_journal_unreserve_handle(handle_t *handle) +static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t) { journal_t *journal = handle->h_journal; WARN_ON(!handle->h_reserved); sub_reserved_credits(journal, handle->h_total_credits); + if (t) + atomic_sub(handle->h_total_credits, &t->t_outstanding_credits); } void jbd2_journal_free_reserved(handle_t *handle) { - __jbd2_journal_unreserve_handle(handle); + journal_t *journal = handle->h_journal; + + /* Get j_state_lock to pin running transaction if it exists */ + read_lock(&journal->j_state_lock); + __jbd2_journal_unreserve_handle(handle, journal->j_running_transaction); + read_unlock(&journal->j_state_lock); jbd2_free_handle(handle); } EXPORT_SYMBOL(jbd2_journal_free_reserved); @@ -722,7 +729,8 @@ static void stop_this_handle(handle_t *handle) atomic_sub(handle->h_total_credits, &transaction->t_outstanding_credits); if (handle->h_rsv_handle) - __jbd2_journal_unreserve_handle(handle->h_rsv_handle); + __jbd2_journal_unreserve_handle(handle->h_rsv_handle, + transaction); if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9486afcdac76..6f65bfa9f18d 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -296,10 +296,9 @@ static int jfs_readpage(struct file *file, struct page *page) return mpage_readpage(page, jfs_get_block); } -static int jfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void jfs_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, jfs_get_block); + mpage_readahead(rac, jfs_get_block); } static void jfs_write_failed(struct address_space *mapping, loff_t to) @@ -358,7 +357,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) const struct address_space_operations jfs_aops = { .readpage = jfs_readpage, - .readpages = jfs_readpages, + .readahead = jfs_readahead, .writepage = jfs_writepage, .writepages = jfs_writepages, .write_begin = jfs_write_begin, diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 3acc954f7c04..837d42f61464 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -2964,7 +2964,7 @@ struct jfs_dirent { loff_t position; int ino; u16 name_len; - char name[0]; + char name[]; }; /* diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h index f0558b3348da..c50167a7bc50 100644 --- a/fs/jfs/jfs_xattr.h +++ b/fs/jfs/jfs_xattr.h @@ -17,12 +17,12 @@ struct jfs_ea { u8 flag; /* Unused? */ u8 namelen; /* Length of name */ __le16 valuelen; /* Length of value */ - char name[0]; /* Attribute name (includes null-terminator) */ + char name[]; /* Attribute name (includes null-terminator) */ }; /* Value immediately follows name */ struct jfs_ea_list { __le32 size; /* overall size */ - struct jfs_ea ea[0]; /* Variable length list */ + struct jfs_ea ea[]; /* Variable length list */ }; /* Macros for defining maxiumum number of bytes supported for EAs */ diff --git a/fs/libfs.c b/fs/libfs.c index 3759fbacf522..4d08edf19c78 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1113,7 +1113,7 @@ int generic_file_fsync(struct file *file, loff_t start, loff_t end, err = __generic_file_fsync(file, start, end, datasync); if (err) return err; - return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); } EXPORT_SYMBOL(generic_file_fsync); diff --git a/fs/locks.c b/fs/locks.c index b8a31c1c4fff..6fd1f6e83178 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -61,7 +61,7 @@ * * Initial implementation of mandatory locks. SunOS turned out to be * a rotten model, so I implemented the "obvious" semantics. - * See 'Documentation/filesystems/mandatory-locking.txt' for details. + * See 'Documentation/filesystems/mandatory-locking.rst' for details. * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. * * Don't allow mandatory locks on mmap()'ed files. Added simple functions to @@ -2823,7 +2823,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, { struct inode *inode = NULL; unsigned int fl_pid; - struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info; + struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); fl_pid = locks_translate_pid(fl, proc_pidns); /* @@ -2901,7 +2901,7 @@ static int locks_show(struct seq_file *f, void *v) { struct locks_iterator *iter = f->private; struct file_lock *fl, *bfl; - struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info; + struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); fl = hlist_entry(v, struct file_lock, fl_link); diff --git a/fs/mount.h b/fs/mount.h index 711a4093e475..c7abb7b394d8 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -9,7 +9,13 @@ struct mnt_namespace { atomic_t count; struct ns_common ns; struct mount * root; + /* + * Traversal and modification of .list is protected by either + * - taking namespace_sem for write, OR + * - taking namespace_sem for read AND taking .ns_lock. + */ struct list_head list; + spinlock_t ns_lock; struct user_namespace *user_ns; struct ucounts *ucounts; u64 seq; /* Sequence number to prevent loops */ @@ -133,9 +139,7 @@ struct proc_mounts { struct mnt_namespace *ns; struct path root; int (*show)(struct seq_file *, struct vfsmount *); - void *cached_mount; - u64 cached_event; - loff_t cached_index; + struct mount cursor; }; extern const struct seq_operations mounts_op; @@ -153,3 +157,5 @@ static inline bool is_anon_ns(struct mnt_namespace *ns) { return ns->seq == 0; } + +extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor); diff --git a/fs/mpage.c b/fs/mpage.c index ccba3c4c4479..830e6cc2a9e7 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -91,7 +91,7 @@ mpage_alloc(struct block_device *bdev, } /* - * support function for mpage_readpages. The fs supplied get_block might + * support function for mpage_readahead. The fs supplied get_block might * return an up to date buffer. This is used to map that buffer into * the page, which allows readpage to avoid triggering a duplicate call * to get_block. @@ -338,13 +338,8 @@ confused: } /** - * mpage_readpages - populate an address space with some pages & start reads against them - * @mapping: the address_space - * @pages: The address of a list_head which contains the target pages. These - * pages have their ->index populated and are otherwise uninitialised. - * The page at @pages->prev has the lowest file offset, and reads should be - * issued in @pages->prev to @pages->next order. - * @nr_pages: The number of pages at *@pages + * mpage_readahead - start reads against pages + * @rac: Describes which pages to read. * @get_block: The filesystem's block mapper function. * * This function walks the pages and the blocks within each page, building and @@ -381,36 +376,25 @@ confused: * * This all causes the disk requests to be issued in the correct order. */ -int -mpage_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages, get_block_t get_block) +void mpage_readahead(struct readahead_control *rac, get_block_t get_block) { + struct page *page; struct mpage_readpage_args args = { .get_block = get_block, .is_readahead = true, }; - unsigned page_idx; - - for (page_idx = 0; page_idx < nr_pages; page_idx++) { - struct page *page = lru_to_page(pages); + while ((page = readahead_page(rac))) { prefetchw(&page->flags); - list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, - page->index, - readahead_gfp_mask(mapping))) { - args.page = page; - args.nr_pages = nr_pages - page_idx; - args.bio = do_mpage_readpage(&args); - } + args.page = page; + args.nr_pages = readahead_count(rac); + args.bio = do_mpage_readpage(&args); put_page(page); } - BUG_ON(!list_empty(pages)); if (args.bio) mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio); - return 0; } -EXPORT_SYMBOL(mpage_readpages); +EXPORT_SYMBOL(mpage_readahead); /* * This isn't called much at all @@ -563,7 +547,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, * Page has buffers, but they are all unmapped. The page was * created by pagein or read over a hole which was handled by * block_read_full_page(). If this address_space is also - * using mpage_readpages then this can rarely happen. + * using mpage_readahead then this can rarely happen. */ goto confused; } diff --git a/fs/namei.c b/fs/namei.c index a320371899cf..d81f73ff1a8b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3505,12 +3505,14 @@ EXPORT_SYMBOL(user_path_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { + bool is_whiteout = S_ISCHR(mode) && dev == WHITEOUT_DEV; int error = may_create(dir, dentry); if (error) return error; - if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) + if ((S_ISCHR(mode) || S_ISBLK(mode)) && !is_whiteout && + !capable(CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) @@ -4345,9 +4347,6 @@ static int do_renameat2(int olddfd, const char __user *oldname, int newdfd, (flags & RENAME_EXCHANGE)) return -EINVAL; - if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) - return -EPERM; - if (flags & RENAME_EXCHANGE) target_flags = 0; @@ -4483,20 +4482,6 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna return do_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } -int vfs_whiteout(struct inode *dir, struct dentry *dentry) -{ - int error = may_create(dir, dentry); - if (error) - return error; - - if (!dir->i_op->mknod) - return -EPERM; - - return dir->i_op->mknod(dir, dentry, - S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); -} -EXPORT_SYMBOL(vfs_whiteout); - int readlink_copy(char __user *buffer, int buflen, const char *link) { int len = PTR_ERR(link); diff --git a/fs/namespace.c b/fs/namespace.c index a28e4db075ed..6d499ab254b7 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -648,6 +648,21 @@ struct vfsmount *lookup_mnt(const struct path *path) return m; } +static inline void lock_ns_list(struct mnt_namespace *ns) +{ + spin_lock(&ns->ns_lock); +} + +static inline void unlock_ns_list(struct mnt_namespace *ns) +{ + spin_unlock(&ns->ns_lock); +} + +static inline bool mnt_is_cursor(struct mount *mnt) +{ + return mnt->mnt.mnt_flags & MNT_CURSOR; +} + /* * __is_local_mountpoint - Test to see if dentry is a mountpoint in the * current mount namespace. @@ -673,11 +688,15 @@ bool __is_local_mountpoint(struct dentry *dentry) goto out; down_read(&namespace_sem); + lock_ns_list(ns); list_for_each_entry(mnt, &ns->list, mnt_list) { + if (mnt_is_cursor(mnt)) + continue; is_covered = (mnt->mnt_mountpoint == dentry); if (is_covered) break; } + unlock_ns_list(ns); up_read(&namespace_sem); out: return is_covered; @@ -1245,46 +1264,71 @@ struct vfsmount *mnt_clone_internal(const struct path *path) } #ifdef CONFIG_PROC_FS +static struct mount *mnt_list_next(struct mnt_namespace *ns, + struct list_head *p) +{ + struct mount *mnt, *ret = NULL; + + lock_ns_list(ns); + list_for_each_continue(p, &ns->list) { + mnt = list_entry(p, typeof(*mnt), mnt_list); + if (!mnt_is_cursor(mnt)) { + ret = mnt; + break; + } + } + unlock_ns_list(ns); + + return ret; +} + /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_mounts *p = m->private; + struct list_head *prev; down_read(&namespace_sem); - if (p->cached_event == p->ns->event) { - void *v = p->cached_mount; - if (*pos == p->cached_index) - return v; - if (*pos == p->cached_index + 1) { - v = seq_list_next(v, &p->ns->list, &p->cached_index); - return p->cached_mount = v; - } + if (!*pos) { + prev = &p->ns->list; + } else { + prev = &p->cursor.mnt_list; + + /* Read after we'd reached the end? */ + if (list_empty(prev)) + return NULL; } - p->cached_event = p->ns->event; - p->cached_mount = seq_list_start(&p->ns->list, *pos); - p->cached_index = *pos; - return p->cached_mount; + return mnt_list_next(p->ns, prev); } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_mounts *p = m->private; + struct mount *mnt = v; - p->cached_mount = seq_list_next(v, &p->ns->list, pos); - p->cached_index = *pos; - return p->cached_mount; + ++*pos; + return mnt_list_next(p->ns, &mnt->mnt_list); } static void m_stop(struct seq_file *m, void *v) { + struct proc_mounts *p = m->private; + struct mount *mnt = v; + + lock_ns_list(p->ns); + if (mnt) + list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list); + else + list_del_init(&p->cursor.mnt_list); + unlock_ns_list(p->ns); up_read(&namespace_sem); } static int m_show(struct seq_file *m, void *v) { struct proc_mounts *p = m->private; - struct mount *r = list_entry(v, struct mount, mnt_list); + struct mount *r = v; return p->show(m, &r->mnt); } @@ -1294,6 +1338,15 @@ const struct seq_operations mounts_op = { .stop = m_stop, .show = m_show, }; + +void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor) +{ + down_read(&namespace_sem); + lock_ns_list(ns); + list_del(&cursor->mnt_list); + unlock_ns_list(ns); + up_read(&namespace_sem); +} #endif /* CONFIG_PROC_FS */ /** @@ -1733,6 +1786,11 @@ static struct mnt_namespace *to_mnt_ns(struct ns_common *ns) return container_of(ns, struct mnt_namespace, ns); } +struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) +{ + return &mnt->ns; +} + static bool mnt_ns_loop(struct dentry *dentry) { /* Could bind mounting the mount namespace inode cause a @@ -3202,6 +3260,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a atomic_set(&new_ns->count, 1); INIT_LIST_HEAD(&new_ns->list); init_waitqueue_head(&new_ns->poll); + spin_lock_init(&new_ns->ns_lock); new_ns->user_ns = get_user_ns(user_ns); new_ns->ucounts = ucounts; return new_ns; @@ -3595,7 +3654,7 @@ EXPORT_SYMBOL(path_is_under); * file system may be mounted on put_old. After all, new_root is a mountpoint. * * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. - * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives + * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives * in this situation. * * Notes: @@ -3842,10 +3901,14 @@ static bool mnt_already_visible(struct mnt_namespace *ns, bool visible = false; down_read(&namespace_sem); + lock_ns_list(ns); list_for_each_entry(mnt, &ns->list, mnt_list) { struct mount *child; int mnt_flags; + if (mnt_is_cursor(mnt)) + continue; + if (mnt->mnt.mnt_sb->s_type != sb->s_type) continue; @@ -3893,6 +3956,7 @@ static bool mnt_already_visible(struct mnt_namespace *ns, next: ; } found: + unlock_ns_list(ns); up_read(&namespace_sem); return visible; } @@ -3954,16 +4018,18 @@ static void mntns_put(struct ns_common *ns) put_mnt_ns(to_mnt_ns(ns)); } -static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns) +static int mntns_install(struct nsset *nsset, struct ns_common *ns) { - struct fs_struct *fs = current->fs; + struct nsproxy *nsproxy = nsset->nsproxy; + struct fs_struct *fs = nsset->fs; struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns; + struct user_namespace *user_ns = nsset->cred->user_ns; struct path root; int err; if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || - !ns_capable(current_user_ns(), CAP_SYS_CHROOT) || - !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) + !ns_capable(user_ns, CAP_SYS_CHROOT) || + !ns_capable(user_ns, CAP_SYS_ADMIN)) return -EPERM; if (is_anon_ns(mnt_ns)) diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c index 7a57ff2528af..8f7cff7a4293 100644 --- a/fs/nfs/blocklayout/extent_tree.c +++ b/fs/nfs/blocklayout/extent_tree.c @@ -582,7 +582,7 @@ retry: if (!arg->layoutupdate_pages) return -ENOMEM; - start_p = __vmalloc(buffer_size, GFP_NOFS, PAGE_KERNEL); + start_p = __vmalloc(buffer_size, GFP_NOFS); if (!start_p) { kfree(arg->layoutupdate_pages); return -ENOMEM; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 1f32a9fbfdaf..6673a77884d9 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -668,7 +668,8 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize) } /* - * Record the page as unstable and mark its inode as dirty. + * Record the page as unstable (an extra writeback period) and mark its + * inode as dirty. */ static inline void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo) @@ -676,8 +677,11 @@ void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo) if (!cinfo->dreq) { struct inode *inode = page_file_mapping(page)->host; - inc_node_page_state(page, NR_UNSTABLE_NFS); - inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE); + /* This page is really still in write-back - just that the + * writeback is happening on the server now. + */ + inc_node_page_state(page, NR_WRITEBACK); + inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); __mark_inode_dirty(inode, I_DIRTY_DATASYNC); } } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 1e767f779c49..639c34fec04a 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -946,9 +946,9 @@ nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, static void nfs_clear_page_commit(struct page *page) { - dec_node_page_state(page, NR_UNSTABLE_NFS); + dec_node_page_state(page, NR_WRITEBACK); dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, - WB_RECLAIMABLE); + WB_WRITEBACK); } /* Called holding the request lock on @req */ diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index a8fb18609146..9e40dfecf1b1 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -127,16 +127,8 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname) goto out; } - { - SHASH_DESC_ON_STACK(desc, tfm); - - desc->tfm = tfm; - - status = crypto_shash_digest(desc, clname->data, clname->len, - cksum.data); - shash_desc_zero(desc); - } - + status = crypto_shash_tfm_digest(tfm, clname->data, clname->len, + cksum.data); if (status) goto out; @@ -1148,7 +1140,6 @@ nfsd4_cld_create_v2(struct nfs4_client *clp) struct crypto_shash *tfm = cn->cn_tfm; struct xdr_netobj cksum; char *principal = NULL; - SHASH_DESC_ON_STACK(desc, tfm); /* Don't upcall if it's already stored */ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) @@ -1170,16 +1161,14 @@ nfsd4_cld_create_v2(struct nfs4_client *clp) else if (clp->cl_cred.cr_principal) principal = clp->cl_cred.cr_principal; if (principal) { - desc->tfm = tfm; cksum.len = crypto_shash_digestsize(tfm); cksum.data = kmalloc(cksum.len, GFP_KERNEL); if (cksum.data == NULL) { ret = -ENOMEM; goto out; } - ret = crypto_shash_digest(desc, principal, strlen(principal), - cksum.data); - shash_desc_zero(desc); + ret = crypto_shash_tfm_digest(tfm, principal, strlen(principal), + cksum.data); if (ret) { kfree(cksum.data); goto out; @@ -1343,7 +1332,6 @@ nfsd4_cld_check_v2(struct nfs4_client *clp) struct crypto_shash *tfm = cn->cn_tfm; struct xdr_netobj cksum; char *principal = NULL; - SHASH_DESC_ON_STACK(desc, tfm); /* did we already find that this client is stable? */ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) @@ -1381,14 +1369,12 @@ found: principal = clp->cl_cred.cr_principal; if (principal == NULL) return -ENOENT; - desc->tfm = tfm; cksum.len = crypto_shash_digestsize(tfm); cksum.data = kmalloc(cksum.len, GFP_KERNEL); if (cksum.data == NULL) return -ENOENT; - status = crypto_shash_digest(desc, principal, strlen(principal), - cksum.data); - shash_desc_zero(desc); + status = crypto_shash_tfm_digest(tfm, principal, + strlen(principal), cksum.data); if (status) { kfree(cksum.data); return -ENOENT; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 0aa02eb18bd3..c3fbab1753ec 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -979,12 +979,13 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, if (test_bit(RQ_LOCAL, &rqstp->rq_flags)) /* - * We want less throttling in balance_dirty_pages() - * and shrink_inactive_list() so that nfs to + * We want throttling in balance_dirty_pages() + * and shrink_inactive_list() to only consider + * the backingdev we are writing to, so that nfs to * localhost doesn't cause nfsd to lock up due to all * the client's dirty pages or its congested queue. */ - current->flags |= PF_LESS_THROTTLE; + current->flags |= PF_LOCAL_THROTTLE; exp = fhp->fh_export; use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp); @@ -1037,7 +1038,7 @@ out_nfserr: nfserr = nfserrno(host_err); } if (test_bit(RQ_LOCAL, &rqstp->rq_flags)) - current_restore_flags(pflags, PF_LESS_THROTTLE); + current_restore_flags(pflags, PF_LOCAL_THROTTLE); return nfserr; } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 671085512e0f..28009ec54420 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -14,6 +14,7 @@ #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/uio.h> +#include <linux/fiemap.h> #include "nilfs.h" #include "btnode.h" #include "segment.h" @@ -145,18 +146,9 @@ static int nilfs_readpage(struct file *file, struct page *page) return mpage_readpage(page, nilfs_get_block); } -/** - * nilfs_readpages() - implement readpages() method of nilfs_aops {} - * address_space_operations. - * @file - file struct of the file to be read - * @mapping - address_space struct used for reading multiple pages - * @pages - the pages to be read - * @nr_pages - number of pages to be read - */ -static int nilfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned int nr_pages) +static void nilfs_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); + mpage_readahead(rac, nilfs_get_block); } static int nilfs_writepages(struct address_space *mapping, @@ -308,7 +300,7 @@ const struct address_space_operations nilfs_aops = { .readpage = nilfs_readpage, .writepages = nilfs_writepages, .set_page_dirty = nilfs_set_page_dirty, - .readpages = nilfs_readpages, + .readahead = nilfs_readahead, .write_begin = nilfs_write_begin, .write_end = nilfs_write_end, /* .releasepage = nilfs_releasepage, */ @@ -1005,7 +997,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, unsigned int blkbits = inode->i_blkbits; int ret, n; - ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); + ret = fiemap_prep(inode, fieinfo, start, &len, 0); if (ret) return ret; diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 380a543c5b19..b55cdeb4d169 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -375,7 +375,7 @@ static inline int nilfs_flush_device(struct the_nilfs *nilfs) */ smp_wmb(); - err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL, NULL); + err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL); if (err != -EIO) err = 0; return err; diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index c18459cea6f4..85eda539b35f 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -70,7 +70,7 @@ static bool fanotify_name_event_equal(struct fanotify_name_event *fne1, return !memcmp(fne1->name, fne2->name, fne1->name_len); } -static bool should_merge(struct fsnotify_event *old_fsn, +static bool fanotify_should_merge(struct fsnotify_event *old_fsn, struct fsnotify_event *new_fsn) { struct fanotify_event *old, *new; @@ -129,7 +129,7 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) return 0; list_for_each_entry_reverse(test_event, list, list) { - if (should_merge(test_event, event)) { + if (fanotify_should_merge(test_event, event)) { FANOTIFY_E(test_event)->mask |= new->mask; return 1; } @@ -232,6 +232,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, if (!fsnotify_iter_should_report_type(iter_info, type)) continue; mark = iter_info->marks[type]; + + /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */ + marks_ignored_mask |= mark->ignored_mask; + /* * If the event is on dir and this mark doesn't care about * events on dir, don't send it! @@ -249,7 +253,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, continue; marks_mask |= mark->mask; - marks_ignored_mask |= mark->ignored_mask; } test_mask = event_mask & marks_mask & ~marks_ignored_mask; diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h index 35bfbf4a7aac..8ce7ccfc4b0d 100644 --- a/fs/notify/fanotify/fanotify.h +++ b/fs/notify/fanotify/fanotify.h @@ -89,7 +89,7 @@ struct fanotify_name_event { __kernel_fsid_t fsid; struct fanotify_fh dir_fh; u8 name_len; - char name[0]; + char name[]; }; static inline struct fanotify_name_event * diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 42cb794c62ac..63b5dffdca9e 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -328,7 +328,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, ret = -EFAULT; /* * Sanity check copy size in case get_one_event() and - * fill_event_metadata() event_len sizes ever get out of sync. + * event_len sizes ever get out of sync. */ if (WARN_ON_ONCE(metadata.event_len > count)) goto out_close_fd; @@ -487,8 +487,10 @@ static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t group = file->private_data; - if (count > sizeof(response)) - count = sizeof(response); + if (count < sizeof(response)) + return -EINVAL; + + count = sizeof(response); pr_debug("%s: group=%p count=%zu\n", __func__, group, count); diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c index ef83f4020554..f0d6b54be412 100644 --- a/fs/notify/fdinfo.c +++ b/fs/notify/fdinfo.c @@ -11,7 +11,6 @@ #include <linux/sched.h> #include <linux/types.h> #include <linux/seq_file.h> -#include <linux/proc_fs.h> #include <linux/exportfs.h> #include "inotify/inotify.h" diff --git a/fs/notify/group.c b/fs/notify/group.c index 133f723aca07..a4a4b1c64d32 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -25,6 +25,7 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group) group->ops->free_group_priv(group); mem_cgroup_put(group->memcg); + mutex_destroy(&group->mark_mutex); kfree(group); } diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig index 6736e47d94d8..7715fadd5fff 100644 --- a/fs/notify/inotify/Kconfig +++ b/fs/notify/inotify/Kconfig @@ -12,6 +12,6 @@ config INOTIFY_USER new features including multiple file events, one-shot support, and unmount notification. - For more information, see <file:Documentation/filesystems/inotify.txt> + For more information, see <file:Documentation/filesystems/inotify.rst> If unsure, say Y. diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 81ffc8629fc4..f88bbcc9efeb 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -764,20 +764,18 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) struct fsnotify_group *group; struct inotify_inode_mark *i_mark; struct fd f; - int ret = 0; + int ret = -EINVAL; f = fdget(fd); if (unlikely(!f.file)) return -EBADF; /* verify that this is indeed an inotify instance */ - ret = -EINVAL; if (unlikely(f.file->f_op != &inotify_fops)) goto out; group = f.file->private_data; - ret = -EINVAL; i_mark = inotify_idr_find(group, wd); if (unlikely(!i_mark)) goto out; diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 1d96216dffd1..8387937b9d01 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -325,13 +325,16 @@ static void fsnotify_put_mark_wake(struct fsnotify_mark *mark) } bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) + __releases(&fsnotify_mark_srcu) { int type; fsnotify_foreach_obj_type(type) { /* This can fail if mark is being removed */ - if (!fsnotify_get_mark_safe(iter_info->marks[type])) + if (!fsnotify_get_mark_safe(iter_info->marks[type])) { + __release(&fsnotify_mark_srcu); goto fail; + } } /* @@ -350,6 +353,7 @@ fail: } void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info) + __acquires(&fsnotify_mark_srcu) { int type; diff --git a/fs/nsfs.c b/fs/nsfs.c index 4f1205725cfe..800c1d0eb0d0 100644 --- a/fs/nsfs.c +++ b/fs/nsfs.c @@ -229,6 +229,11 @@ int ns_get_name(char *buf, size_t size, struct task_struct *task, return res; } +bool proc_ns_file(const struct file *file) +{ + return file->f_op == &ns_file_operations; +} + struct file *proc_ns_fget(int fd) { struct file *file; diff --git a/fs/ntfs/Kconfig b/fs/ntfs/Kconfig index de9fb5cff226..1667a7e590d8 100644 --- a/fs/ntfs/Kconfig +++ b/fs/ntfs/Kconfig @@ -18,7 +18,7 @@ config NTFS_FS the Linux 2.4 kernel series is separately available as a patch from the project web site. - For more information see <file:Documentation/filesystems/ntfs.txt> + For more information see <file:Documentation/filesystems/ntfs.rst> and <http://www.linux-ntfs.org/>. To compile this file system support as a module, choose M here: the diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 554b744f41bf..bb0a43860ad2 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -1732,7 +1732,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { bh = bh->b_this_page; } while (bh); tail->b_this_page = head; - attach_page_buffers(page, head); + attach_page_private(page, head); } else buffers_to_free = bh; } diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h index 842b0bfc3ac9..7068425735f1 100644 --- a/fs/ntfs/malloc.h +++ b/fs/ntfs/malloc.h @@ -34,7 +34,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) /* return (void *)__get_free_page(gfp_mask); */ } if (likely((size >> PAGE_SHIFT) < totalram_pages())) - return __vmalloc(size, gfp_mask, PAGE_KERNEL); + return __vmalloc(size, gfp_mask); return NULL; } diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 3aac5c917afe..fbb9f1bc623d 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -504,7 +504,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, bh = bh->b_this_page; } while (bh); tail->b_this_page = head; - attach_page_buffers(page, head); + attach_page_private(page, head); } bh = head = page_buffers(page); BUG_ON(!bh); diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig index 46bba20da6b5..1177c33df895 100644 --- a/fs/ocfs2/Kconfig +++ b/fs/ocfs2/Kconfig @@ -21,7 +21,7 @@ config OCFS2_FS OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ For more information on OCFS2, see the file - <file:Documentation/filesystems/ocfs2.txt>. + <file:Documentation/filesystems/ocfs2.rst>. config OCFS2_FS_O2CB tristate "O2CB Kernelspace Clustering" diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 3a67a6518ddf..3bfb4147895a 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -350,14 +350,11 @@ out: * grow out to a tree. If need be, detecting boundary extents could * trivially be added in a future version of ocfs2_get_block(). */ -static int ocfs2_readpages(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void ocfs2_readahead(struct readahead_control *rac) { - int ret, err = -EIO; - struct inode *inode = mapping->host; + int ret; + struct inode *inode = rac->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); - loff_t start; - struct page *last; /* * Use the nonblocking flag for the dlm code to avoid page @@ -365,36 +362,31 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping, */ ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); if (ret) - return err; + return; - if (down_read_trylock(&oi->ip_alloc_sem) == 0) { - ocfs2_inode_unlock(inode, 0); - return err; - } + if (down_read_trylock(&oi->ip_alloc_sem) == 0) + goto out_unlock; /* * Don't bother with inline-data. There isn't anything * to read-ahead in that case anyway... */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) - goto out_unlock; + goto out_up; /* * Check whether a remote node truncated this file - we just * drop out in that case as it's not worth handling here. */ - last = lru_to_page(pages); - start = (loff_t)last->index << PAGE_SHIFT; - if (start >= i_size_read(inode)) - goto out_unlock; + if (readahead_pos(rac) >= i_size_read(inode)) + goto out_up; - err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); + mpage_readahead(rac, ocfs2_get_block); -out_unlock: +out_up: up_read(&oi->ip_alloc_sem); +out_unlock: ocfs2_inode_unlock(inode, 0); - - return err; } /* Note: Because we don't support holes, our allocation has @@ -2474,7 +2466,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) const struct address_space_operations ocfs2_aops = { .readpage = ocfs2_readpage, - .readpages = ocfs2_readpages, + .readahead = ocfs2_readahead, .writepage = ocfs2_writepage, .write_begin = ocfs2_write_begin, .write_end = ocfs2_write_end, diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 2c512b40a940..79a231719460 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -1441,22 +1441,6 @@ static void o2net_rx_until_empty(struct work_struct *work) sc_put(sc); } -static int o2net_set_nodelay(struct socket *sock) -{ - int val = 1; - - return kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, - (void *)&val, sizeof(val)); -} - -static int o2net_set_usertimeout(struct socket *sock) -{ - int user_timeout = O2NET_TCP_USER_TIMEOUT; - - return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, - (void *)&user_timeout, sizeof(user_timeout)); -} - static void o2net_initialize_handshake(void) { o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32( @@ -1636,17 +1620,8 @@ static void o2net_start_connect(struct work_struct *work) goto out; } - ret = o2net_set_nodelay(sc->sc_sock); - if (ret) { - mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret); - goto out; - } - - ret = o2net_set_usertimeout(sock); - if (ret) { - mlog(ML_ERROR, "set TCP_USER_TIMEOUT failed with %d\n", ret); - goto out; - } + tcp_sock_set_nodelay(sc->sc_sock->sk); + tcp_sock_set_user_timeout(sock->sk, O2NET_TCP_USER_TIMEOUT); o2net_register_callbacks(sc->sc_sock->sk, sc); @@ -1832,17 +1807,8 @@ static int o2net_accept_one(struct socket *sock, int *more) *more = 1; new_sock->sk->sk_allocation = GFP_ATOMIC; - ret = o2net_set_nodelay(new_sock); - if (ret) { - mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret); - goto out; - } - - ret = o2net_set_usertimeout(new_sock); - if (ret) { - mlog(ML_ERROR, "set TCP_USER_TIMEOUT failed with %d\n", ret); - goto out; - } + tcp_sock_set_nodelay(new_sock->sk); + tcp_sock_set_user_timeout(new_sock->sk, O2NET_TCP_USER_TIMEOUT); ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin, 1); if (ret < 0) diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 55a6512e9fde..f105746063ed 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2760,6 +2760,7 @@ leave: * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) + __must_hold(&dlm->spinlock) { int ret; int lock_dropped = 0; diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 1de77f1a600b..ea868c6f9800 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -227,7 +227,7 @@ static ssize_t dlmfs_file_read(struct file *filp, loff_t *ppos) { int bytes_left; - ssize_t readlen, got; + ssize_t got; char *lvb_buf; struct inode *inode = file_inode(filp); @@ -237,36 +237,31 @@ static ssize_t dlmfs_file_read(struct file *filp, if (*ppos >= i_size_read(inode)) return 0; + /* don't read past the lvb */ + if (count > i_size_read(inode) - *ppos) + count = i_size_read(inode) - *ppos; + if (!count) return 0; - if (!access_ok(buf, count)) - return -EFAULT; - - /* don't read past the lvb */ - if ((count + *ppos) > i_size_read(inode)) - readlen = i_size_read(inode) - *ppos; - else - readlen = count; - - lvb_buf = kmalloc(readlen, GFP_NOFS); + lvb_buf = kmalloc(count, GFP_NOFS); if (!lvb_buf) return -ENOMEM; - got = user_dlm_read_lvb(inode, lvb_buf, readlen); + got = user_dlm_read_lvb(inode, lvb_buf, count); if (got) { - BUG_ON(got != readlen); - bytes_left = __copy_to_user(buf, lvb_buf, readlen); - readlen -= bytes_left; + BUG_ON(got != count); + bytes_left = copy_to_user(buf, lvb_buf, count); + count -= bytes_left; } else - readlen = 0; + count = 0; kfree(lvb_buf); - *ppos = *ppos + readlen; + *ppos = *ppos + count; - mlog(0, "read %zd bytes\n", readlen); - return readlen; + mlog(0, "read %zu bytes\n", count); + return count; } static ssize_t dlmfs_file_write(struct file *filp, @@ -291,9 +286,6 @@ static ssize_t dlmfs_file_write(struct file *filp, if (!count) return 0; - if (!access_ok(buf, count)) - return -EFAULT; - lvb_buf = kmalloc(count, GFP_NOFS); if (!lvb_buf) return -ENOMEM; diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index e3e2d1b2af51..a94852af5510 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -733,8 +733,6 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, return 0; } -#define OCFS2_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) - int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 map_start, u64 map_len) { @@ -746,7 +744,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, struct buffer_head *di_bh = NULL; struct ocfs2_extent_rec rec; - ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS); + ret = fiemap_prep(inode, fieinfo, map_start, &map_len, 0); if (ret) return ret; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 6cd5e4924e4d..85979e2214b3 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -194,7 +194,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, needs_barrier = true; err = jbd2_complete_transaction(journal, commit_tid); if (needs_barrier) { - ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); if (!err) err = ret; } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 9150cfa4df7d..ee5d98516212 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -279,6 +279,7 @@ enum ocfs2_mount_options OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15, /* Journal Async Commit */ OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */ OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */ + OCFS2_MOUNT_NOCLUSTER = 1 << 18, /* No cluster aware filesystem mount */ }; #define OCFS2_OSB_SOFT_RO 0x0001 @@ -673,7 +674,8 @@ static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb) static inline int ocfs2_mount_local(struct ocfs2_super *osb) { - return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT); + return ((osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT) + || (osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER)); } static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 8caeceeaeda7..4da0e4b1e79b 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -254,14 +254,16 @@ static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si, int i, ret = -ENOSPC; if ((preferred >= 0) && (preferred < si->si_num_slots)) { - if (!si->si_slots[preferred].sl_valid) { + if (!si->si_slots[preferred].sl_valid || + !si->si_slots[preferred].sl_node_num) { ret = preferred; goto out; } } for(i = 0; i < si->si_num_slots; i++) { - if (!si->si_slots[i].sl_valid) { + if (!si->si_slots[i].sl_valid || + !si->si_slots[i].sl_node_num) { ret = i; break; } @@ -456,24 +458,30 @@ int ocfs2_find_slot(struct ocfs2_super *osb) spin_lock(&osb->osb_lock); ocfs2_update_slot_info(si); - /* search for ourselves first and take the slot if it already - * exists. Perhaps we need to mark this in a variable for our - * own journal recovery? Possibly not, though we certainly - * need to warn to the user */ - slot = __ocfs2_node_num_to_slot(si, osb->node_num); - if (slot < 0) { - /* if no slot yet, then just take 1st available - * one. */ - slot = __ocfs2_find_empty_slot(si, osb->preferred_slot); + if (ocfs2_mount_local(osb)) + /* use slot 0 directly in local mode */ + slot = 0; + else { + /* search for ourselves first and take the slot if it already + * exists. Perhaps we need to mark this in a variable for our + * own journal recovery? Possibly not, though we certainly + * need to warn to the user */ + slot = __ocfs2_node_num_to_slot(si, osb->node_num); if (slot < 0) { - spin_unlock(&osb->osb_lock); - mlog(ML_ERROR, "no free slots available!\n"); - status = -EINVAL; - goto bail; - } - } else - printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already " - "allocated to this node!\n", slot, osb->dev_str); + /* if no slot yet, then just take 1st available + * one. */ + slot = __ocfs2_find_empty_slot(si, osb->preferred_slot); + if (slot < 0) { + spin_unlock(&osb->osb_lock); + mlog(ML_ERROR, "no free slots available!\n"); + status = -EINVAL; + goto bail; + } + } else + printk(KERN_INFO "ocfs2: Slot %d on device (%s) was " + "already allocated to this node!\n", + slot, osb->dev_str); + } ocfs2_set_slot(si, slot, osb->node_num); osb->slot_num = slot; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index ac61eeaf3837..71ea9ce71a6b 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -175,6 +175,7 @@ enum { Opt_dir_resv_level, Opt_journal_async_commit, Opt_err_cont, + Opt_nocluster, Opt_err, }; @@ -208,6 +209,7 @@ static const match_table_t tokens = { {Opt_dir_resv_level, "dir_resv_level=%u"}, {Opt_journal_async_commit, "journal_async_commit"}, {Opt_err_cont, "errors=continue"}, + {Opt_nocluster, "nocluster"}, {Opt_err, NULL} }; @@ -619,6 +621,13 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) goto out; } + tmp = OCFS2_MOUNT_NOCLUSTER; + if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { + ret = -EINVAL; + mlog(ML_ERROR, "Cannot change nocluster option on remount\n"); + goto out; + } + tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | OCFS2_MOUNT_HB_NONE; if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { @@ -859,6 +868,7 @@ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb, } if (ocfs2_userspace_stack(osb) && + !(osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) && strncmp(osb->osb_cluster_stack, mopt->cluster_stack, OCFS2_STACK_LABEL_LEN)) { mlog(ML_ERROR, @@ -1139,6 +1149,11 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : "ordered"); + if ((osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) && + !(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT)) + printk(KERN_NOTICE "ocfs2: The shared device (%s) is mounted " + "without cluster aware mode.\n", osb->dev_str); + atomic_set(&osb->vol_state, VOLUME_MOUNTED); wake_up(&osb->osb_mount_event); @@ -1445,6 +1460,9 @@ static int ocfs2_parse_options(struct super_block *sb, case Opt_journal_async_commit: mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; break; + case Opt_nocluster: + mopt->mount_opt |= OCFS2_MOUNT_NOCLUSTER; + break; default: mlog(ML_ERROR, "Unrecognized mount option \"%s\" " @@ -1556,6 +1574,9 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root) if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT) seq_printf(s, ",journal_async_commit"); + if (opts & OCFS2_MOUNT_NOCLUSTER) + seq_printf(s, ",nocluster"); + return 0; } diff --git a/fs/omfs/file.c b/fs/omfs/file.c index d640b9388238..d7b5f09d298c 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -289,10 +289,9 @@ static int omfs_readpage(struct file *file, struct page *page) return block_read_full_page(page, omfs_get_block); } -static int omfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void omfs_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); + mpage_readahead(rac, omfs_get_block); } static int omfs_writepage(struct page *page, struct writeback_control *wbc) @@ -373,7 +372,7 @@ const struct inode_operations omfs_file_inops = { const struct address_space_operations omfs_aops = { .readpage = omfs_readpage, - .readpages = omfs_readpages, + .readahead = omfs_readahead, .writepage = omfs_writepage, .writepages = omfs_writepages, .write_begin = omfs_write_begin, diff --git a/fs/open.c b/fs/open.c index 719b320ede52..6cd48a61cda3 100644 --- a/fs/open.c +++ b/fs/open.c @@ -345,21 +345,14 @@ SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len) * We do this by temporarily clearing all FS-related capabilities and * switching the fsuid/fsgid around to the real ones. */ -long do_faccessat(int dfd, const char __user *filename, int mode) +static const struct cred *access_override_creds(void) { const struct cred *old_cred; struct cred *override_cred; - struct path path; - struct inode *inode; - int res; - unsigned int lookup_flags = LOOKUP_FOLLOW; - - if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ - return -EINVAL; override_cred = prepare_creds(); if (!override_cred) - return -ENOMEM; + return NULL; override_cred->fsuid = override_cred->uid; override_cred->fsgid = override_cred->gid; @@ -394,6 +387,38 @@ long do_faccessat(int dfd, const char __user *filename, int mode) override_cred->non_rcu = 1; old_cred = override_creds(override_cred); + + /* override_cred() gets its own ref */ + put_cred(override_cred); + + return old_cred; +} + +long do_faccessat(int dfd, const char __user *filename, int mode, int flags) +{ + struct path path; + struct inode *inode; + int res; + unsigned int lookup_flags = LOOKUP_FOLLOW; + const struct cred *old_cred = NULL; + + if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ + return -EINVAL; + + if (flags & ~(AT_EACCESS | AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) + return -EINVAL; + + if (flags & AT_SYMLINK_NOFOLLOW) + lookup_flags &= ~LOOKUP_FOLLOW; + if (flags & AT_EMPTY_PATH) + lookup_flags |= LOOKUP_EMPTY; + + if (!(flags & AT_EACCESS)) { + old_cred = access_override_creds(); + if (!old_cred) + return -ENOMEM; + } + retry: res = user_path_at(dfd, filename, lookup_flags, &path); if (res) @@ -435,19 +460,26 @@ out_path_release: goto retry; } out: - revert_creds(old_cred); - put_cred(override_cred); + if (old_cred) + revert_creds(old_cred); + return res; } SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode) { - return do_faccessat(dfd, filename, mode); + return do_faccessat(dfd, filename, mode, 0); +} + +SYSCALL_DEFINE4(faccessat2, int, dfd, const char __user *, filename, int, mode, + int, flags) +{ + return do_faccessat(dfd, filename, mode, flags); } SYSCALL_DEFINE2(access, const char __user *, filename, int, mode) { - return do_faccessat(AT_FDCWD, filename, mode); + return do_faccessat(AT_FDCWD, filename, mode, 0); } int ksys_chdir(const char __user *filename) @@ -743,9 +775,8 @@ static int do_dentry_open(struct file *f, path_get(&f->f_path); f->f_inode = inode; f->f_mapping = inode->i_mapping; - - /* Ensure that we skip any errors that predate opening of the file */ f->f_wb_err = filemap_sample_wb_err(f->f_mapping); + f->f_sb_err = file_sample_sb_err(f); if (unlikely(f->f_flags & O_PATH)) { f->f_mode = FMODE_PATH | FMODE_OPENED; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 12ae630fbed7..48f0547d4850 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -62,12 +62,7 @@ static int orangefs_writepage_locked(struct page *page, } else { ret = 0; } - if (wr) { - kfree(wr); - set_page_private(page, 0); - ClearPagePrivate(page); - put_page(page); - } + kfree(detach_page_private(page)); return ret; } @@ -409,9 +404,7 @@ static int orangefs_write_begin(struct file *file, wr->len = len; wr->uid = current_fsuid(); wr->gid = current_fsgid(); - SetPagePrivate(page); - set_page_private(page, (unsigned long)wr); - get_page(page); + attach_page_private(page, wr); okay: return 0; } @@ -459,18 +452,12 @@ static void orangefs_invalidatepage(struct page *page, wr = (struct orangefs_write_range *)page_private(page); if (offset == 0 && length == PAGE_SIZE) { - kfree((struct orangefs_write_range *)page_private(page)); - set_page_private(page, 0); - ClearPagePrivate(page); - put_page(page); + kfree(detach_page_private(page)); return; /* write range entirely within invalidate range (or equal) */ } else if (page_offset(page) + offset <= wr->pos && wr->pos + wr->len <= page_offset(page) + offset + length) { - kfree((struct orangefs_write_range *)page_private(page)); - set_page_private(page, 0); - ClearPagePrivate(page); - put_page(page); + kfree(detach_page_private(page)); /* XXX is this right? only caller in fs */ cancel_dirty_page(page); return; @@ -535,12 +522,7 @@ static int orangefs_releasepage(struct page *page, gfp_t foo) static void orangefs_freepage(struct page *page) { - if (PagePrivate(page)) { - kfree((struct orangefs_write_range *)page_private(page)); - set_page_private(page, 0); - ClearPagePrivate(page); - put_page(page); - } + kfree(detach_page_private(page)); } static int orangefs_launder_page(struct page *page) @@ -740,9 +722,7 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf) wr->len = PAGE_SIZE; wr->uid = current_fsuid(); wr->gid = current_fsgid(); - SetPagePrivate(page); - set_page_private(page, (unsigned long)wr); - get_page(page); + attach_page_private(page, wr); okay: file_update_time(vmf->vma->vm_file); diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig index 714c14c47ca5..dd188c7996b3 100644 --- a/fs/overlayfs/Kconfig +++ b/fs/overlayfs/Kconfig @@ -9,7 +9,7 @@ config OVERLAY_FS 'lower' filesystem is either hidden or, in the case of directories, merged with the 'upper' object. - For more information see Documentation/filesystems/overlayfs.txt + For more information see Documentation/filesystems/overlayfs.rst config OVERLAY_FS_REDIRECT_DIR bool "Overlayfs: turn on redirect directory feature by default" @@ -38,7 +38,7 @@ config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW If backward compatibility is not an issue, then it is safe and recommended to say N here. - For more information, see Documentation/filesystems/overlayfs.txt + For more information, see Documentation/filesystems/overlayfs.rst If unsure, say Y. @@ -103,7 +103,7 @@ config OVERLAY_FS_XINO_AUTO If compatibility with applications that expect 32bit inodes is not an issue, then it is safe and recommended to say Y here. - For more information, see Documentation/filesystems/overlayfs.txt + For more information, see Documentation/filesystems/overlayfs.rst If unsure, say N. diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 981f11ec51bc..7af76b9004eb 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -10,6 +10,7 @@ #include <linux/xattr.h> #include <linux/posix_acl.h> #include <linux/ratelimit.h> +#include <linux/fiemap.h> #include "overlayfs.h" @@ -479,10 +480,6 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return -EOPNOTSUPP; old_cred = ovl_override_creds(inode->i_sb); - - if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) - filemap_write_and_wait(realinode->i_mapping); - err = realinode->i_op->fiemap(realinode, fieinfo, start, len); revert_creds(old_cred); diff --git a/fs/pipe.c b/fs/pipe.c index 16fb72e9abf7..c7c4fb5f345f 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -140,21 +140,20 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe, put_page(page); } -static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) +static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) { struct page *page = buf->page; - if (page_count(page) == 1) { - memcg_kmem_uncharge_page(page, 0); - __SetPageLocked(page); - return 0; - } - return 1; + if (page_count(page) != 1) + return false; + memcg_kmem_uncharge_page(page, 0); + __SetPageLocked(page); + return true; } /** - * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer + * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal * @@ -165,8 +164,8 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, * he wishes; the typical use is insertion into a different file * page cache. */ -int generic_pipe_buf_steal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) +bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) { struct page *page = buf->page; @@ -177,12 +176,11 @@ int generic_pipe_buf_steal(struct pipe_inode_info *pipe, */ if (page_count(page) == 1) { lock_page(page); - return 0; + return true; } - - return 1; + return false; } -EXPORT_SYMBOL(generic_pipe_buf_steal); +EXPORT_SYMBOL(generic_pipe_buf_try_steal); /** * generic_pipe_buf_get - get a reference to a &struct pipe_buffer @@ -201,22 +199,6 @@ bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) EXPORT_SYMBOL(generic_pipe_buf_get); /** - * generic_pipe_buf_confirm - verify contents of the pipe buffer - * @info: the pipe that the buffer belongs to - * @buf: the buffer to confirm - * - * Description: - * This function does nothing, because the generic pipe code uses - * pages that are always good when inserted into the pipe. - */ -int generic_pipe_buf_confirm(struct pipe_inode_info *info, - struct pipe_buffer *buf) -{ - return 0; -} -EXPORT_SYMBOL(generic_pipe_buf_confirm); - -/** * generic_pipe_buf_release - put a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to put a reference to @@ -231,48 +213,12 @@ void generic_pipe_buf_release(struct pipe_inode_info *pipe, } EXPORT_SYMBOL(generic_pipe_buf_release); -/* New data written to a pipe may be appended to a buffer with this type. */ static const struct pipe_buf_operations anon_pipe_buf_ops = { - .confirm = generic_pipe_buf_confirm, - .release = anon_pipe_buf_release, - .steal = anon_pipe_buf_steal, - .get = generic_pipe_buf_get, -}; - -static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = { - .confirm = generic_pipe_buf_confirm, - .release = anon_pipe_buf_release, - .steal = anon_pipe_buf_steal, - .get = generic_pipe_buf_get, -}; - -static const struct pipe_buf_operations packet_pipe_buf_ops = { - .confirm = generic_pipe_buf_confirm, - .release = anon_pipe_buf_release, - .steal = anon_pipe_buf_steal, - .get = generic_pipe_buf_get, + .release = anon_pipe_buf_release, + .try_steal = anon_pipe_buf_try_steal, + .get = generic_pipe_buf_get, }; -/** - * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable - * @buf: the buffer to mark - * - * Description: - * This function ensures that no future writes will be merged into the - * given &struct pipe_buffer. This is necessary when multiple pipe buffers - * share the same backing page. - */ -void pipe_buf_mark_unmergeable(struct pipe_buffer *buf) -{ - if (buf->ops == &anon_pipe_buf_ops) - buf->ops = &anon_pipe_buf_nomerge_ops; -} - -static bool pipe_buf_can_merge(struct pipe_buffer *buf) -{ - return buf->ops == &anon_pipe_buf_ops; -} - /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */ static inline bool pipe_readable(const struct pipe_inode_info *pipe) { @@ -478,7 +424,8 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask]; int offset = buf->offset + buf->len; - if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) { + if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) && + offset + chars <= PAGE_SIZE) { ret = pipe_buf_confirm(pipe, buf); if (ret) goto out; @@ -541,11 +488,10 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) buf->ops = &anon_pipe_buf_ops; buf->offset = 0; buf->len = 0; - buf->flags = 0; - if (is_packetized(filp)) { - buf->ops = &packet_pipe_buf_ops; + if (is_packetized(filp)) buf->flags = PIPE_BUF_FLAG_PACKET; - } + else + buf->flags = PIPE_BUF_FLAG_CAN_MERGE; pipe->tmp_page = NULL; copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index 27ef84d99f59..971a42f6357d 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -23,7 +23,7 @@ config PROC_FS /proc" or the equivalent line in /etc/fstab does the job. The /proc file system is explained in the file - <file:Documentation/filesystems/proc.txt> and on the proc(5) manpage + <file:Documentation/filesystems/proc.rst> and on the proc(5) manpage ("man 5 proc"). This option will enlarge your kernel by about 67 KB. Several @@ -95,7 +95,7 @@ config PROC_CHILDREN default n help Provides a fast way to retrieve first level children pids of a task. See - <file:Documentation/filesystems/proc.txt> for more information. + <file:Documentation/filesystems/proc.rst> for more information. Say Y if you are running any user-space software which takes benefit from this interface. For example, rkt is such a piece of software. diff --git a/fs/proc/array.c b/fs/proc/array.c index 8e16f14bb05a..713ffac59bbb 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -248,8 +248,8 @@ void render_sigset_t(struct seq_file *m, const char *header, seq_putc(m, '\n'); } -static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, - sigset_t *catch) +static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *sigign, + sigset_t *sigcatch) { struct k_sigaction *k; int i; @@ -257,9 +257,9 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, k = p->sighand->action; for (i = 1; i <= _NSIG; ++i, ++k) { if (k->sa.sa_handler == SIG_IGN) - sigaddset(ign, i); + sigaddset(sigign, i); else if (k->sa.sa_handler != SIG_DFL) - sigaddset(catch, i); + sigaddset(sigcatch, i); } } @@ -728,7 +728,7 @@ static int children_seq_show(struct seq_file *seq, void *v) { struct inode *inode = file_inode(seq->file); - seq_printf(seq, "%d ", pid_nr_ns(v, proc_pid_ns(inode))); + seq_printf(seq, "%d ", pid_nr_ns(v, proc_pid_ns(inode->i_sb))); return 0; } diff --git a/fs/proc/base.c b/fs/proc/base.c index eb2255e95f62..b1d94d14ed5a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -697,13 +697,21 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr) * May current process learn task's sched/cmdline info (for hide_pid_min=1) * or euid/egid (for hide_pid_min=2)? */ -static bool has_pid_permissions(struct pid_namespace *pid, +static bool has_pid_permissions(struct proc_fs_info *fs_info, struct task_struct *task, - int hide_pid_min) + enum proc_hidepid hide_pid_min) { - if (pid->hide_pid < hide_pid_min) + /* + * If 'hidpid' mount option is set force a ptrace check, + * we indicate that we are using a filesystem syscall + * by passing PTRACE_MODE_READ_FSCREDS + */ + if (fs_info->hide_pid == HIDEPID_NOT_PTRACEABLE) + return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); + + if (fs_info->hide_pid < hide_pid_min) return true; - if (in_group_p(pid->pid_gid)) + if (in_group_p(fs_info->pid_gid)) return true; return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); } @@ -711,18 +719,18 @@ static bool has_pid_permissions(struct pid_namespace *pid, static int proc_pid_permission(struct inode *inode, int mask) { - struct pid_namespace *pid = proc_pid_ns(inode); + struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb); struct task_struct *task; bool has_perms; task = get_proc_task(inode); if (!task) return -ESRCH; - has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS); + has_perms = has_pid_permissions(fs_info, task, HIDEPID_NO_ACCESS); put_task_struct(task); if (!has_perms) { - if (pid->hide_pid == HIDEPID_INVISIBLE) { + if (fs_info->hide_pid == HIDEPID_INVISIBLE) { /* * Let's make getdents(), stat(), and open() * consistent with each other. If a process @@ -746,7 +754,7 @@ static const struct inode_operations proc_def_inode_operations = { static int proc_single_show(struct seq_file *m, void *v) { struct inode *inode = m->private; - struct pid_namespace *ns = proc_pid_ns(inode); + struct pid_namespace *ns = proc_pid_ns(inode->i_sb); struct pid *pid = proc_pid(inode); struct task_struct *task; int ret; @@ -1415,7 +1423,7 @@ static const struct file_operations proc_fail_nth_operations = { static int sched_show(struct seq_file *m, void *v) { struct inode *inode = m->private; - struct pid_namespace *ns = proc_pid_ns(inode); + struct pid_namespace *ns = proc_pid_ns(inode->i_sb); struct task_struct *p; p = get_proc_task(inode); @@ -1909,7 +1917,7 @@ int pid_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); - struct pid_namespace *pid = proc_pid_ns(inode); + struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb); struct task_struct *task; generic_fillattr(inode, stat); @@ -1919,7 +1927,7 @@ int pid_getattr(const struct path *path, struct kstat *stat, rcu_read_lock(); task = pid_task(proc_pid(inode), PIDTYPE_PID); if (task) { - if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) { + if (!has_pid_permissions(fs_info, task, HIDEPID_INVISIBLE)) { rcu_read_unlock(); /* * This doesn't prevent learning whether PID exists, @@ -2470,7 +2478,7 @@ static int proc_timers_open(struct inode *inode, struct file *file) return -ENOMEM; tp->pid = proc_pid(inode); - tp->ns = proc_pid_ns(inode); + tp->ns = proc_pid_ns(inode->i_sb); return 0; } @@ -3312,6 +3320,7 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags) { struct task_struct *task; unsigned tgid; + struct proc_fs_info *fs_info; struct pid_namespace *ns; struct dentry *result = ERR_PTR(-ENOENT); @@ -3319,7 +3328,8 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags) if (tgid == ~0U) goto out; - ns = dentry->d_sb->s_fs_info; + fs_info = proc_sb_info(dentry->d_sb); + ns = fs_info->pid_ns; rcu_read_lock(); task = find_task_by_pid_ns(tgid, ns); if (task) @@ -3328,7 +3338,14 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags) if (!task) goto out; + /* Limit procfs to only ptraceable tasks */ + if (fs_info->hide_pid == HIDEPID_NOT_PTRACEABLE) { + if (!has_pid_permissions(fs_info, task, HIDEPID_NO_ACCESS)) + goto out_put_task; + } + result = proc_pid_instantiate(dentry, task, NULL); +out_put_task: put_task_struct(task); out: return result; @@ -3354,20 +3371,8 @@ retry: pid = find_ge_pid(iter.tgid, ns); if (pid) { iter.tgid = pid_nr_ns(pid, ns); - iter.task = pid_task(pid, PIDTYPE_PID); - /* What we to know is if the pid we have find is the - * pid of a thread_group_leader. Testing for task - * being a thread_group_leader is the obvious thing - * todo but there is a window when it fails, due to - * the pid transfer logic in de_thread. - * - * So we perform the straight forward test of seeing - * if the pid we have found is the pid of a thread - * group leader, and don't worry if the task we have - * found doesn't happen to be a thread group leader. - * As we don't care in the case of readdir. - */ - if (!iter.task || !has_group_leader_pid(iter.task)) { + iter.task = pid_task(pid, PIDTYPE_TGID); + if (!iter.task) { iter.tgid += 1; goto retry; } @@ -3383,20 +3388,21 @@ retry: int proc_pid_readdir(struct file *file, struct dir_context *ctx) { struct tgid_iter iter; - struct pid_namespace *ns = proc_pid_ns(file_inode(file)); + struct proc_fs_info *fs_info = proc_sb_info(file_inode(file)->i_sb); + struct pid_namespace *ns = proc_pid_ns(file_inode(file)->i_sb); loff_t pos = ctx->pos; if (pos >= PID_MAX_LIMIT + TGID_OFFSET) return 0; if (pos == TGID_OFFSET - 2) { - struct inode *inode = d_inode(ns->proc_self); + struct inode *inode = d_inode(fs_info->proc_self); if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK)) return 0; ctx->pos = pos = pos + 1; } if (pos == TGID_OFFSET - 1) { - struct inode *inode = d_inode(ns->proc_thread_self); + struct inode *inode = d_inode(fs_info->proc_thread_self); if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK)) return 0; ctx->pos = pos = pos + 1; @@ -3410,7 +3416,7 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) unsigned int len; cond_resched(); - if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE)) + if (!has_pid_permissions(fs_info, iter.task, HIDEPID_INVISIBLE)) continue; len = snprintf(name, sizeof(name), "%u", iter.tgid); @@ -3610,6 +3616,7 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry struct task_struct *task; struct task_struct *leader = get_proc_task(dir); unsigned tid; + struct proc_fs_info *fs_info; struct pid_namespace *ns; struct dentry *result = ERR_PTR(-ENOENT); @@ -3620,7 +3627,8 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry if (tid == ~0U) goto out; - ns = dentry->d_sb->s_fs_info; + fs_info = proc_sb_info(dentry->d_sb); + ns = fs_info->pid_ns; rcu_read_lock(); task = find_task_by_pid_ns(tid, ns); if (task) @@ -3734,7 +3742,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) /* f_version caches the tgid value that the last readdir call couldn't * return. lseek aka telldir automagically resets f_version to 0. */ - ns = proc_pid_ns(inode); + ns = proc_pid_ns(inode->i_sb); tid = (int)file->f_version; file->f_version = 0; for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 4ed6dabdf6ff..2f9fa179194d 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -269,6 +269,11 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry, struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { + struct proc_fs_info *fs_info = proc_sb_info(dir->i_sb); + + if (fs_info->pidonly == PROC_PIDONLY_ON) + return ERR_PTR(-ENOENT); + return proc_lookup_de(dir, dentry, PDE(dir)); } @@ -325,6 +330,10 @@ int proc_readdir_de(struct file *file, struct dir_context *ctx, int proc_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); + struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb); + + if (fs_info->pidonly == PROC_PIDONLY_ON) + return 1; return proc_readdir_de(file, ctx, PDE(inode)); } diff --git a/fs/proc/inode.c b/fs/proc/inode.c index fb4cace9ea41..f40c2532c057 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -24,6 +24,7 @@ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/mount.h> +#include <linux/bug.h> #include <linux/uaccess.h> @@ -165,15 +166,28 @@ void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock deactivate_super(old_sb); } +static inline const char *hidepid2str(enum proc_hidepid v) +{ + switch (v) { + case HIDEPID_OFF: return "off"; + case HIDEPID_NO_ACCESS: return "noaccess"; + case HIDEPID_INVISIBLE: return "invisible"; + case HIDEPID_NOT_PTRACEABLE: return "ptraceable"; + } + WARN_ONCE(1, "bad hide_pid value: %d\n", v); + return "unknown"; +} + static int proc_show_options(struct seq_file *seq, struct dentry *root) { - struct super_block *sb = root->d_sb; - struct pid_namespace *pid = sb->s_fs_info; + struct proc_fs_info *fs_info = proc_sb_info(root->d_sb); - if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID)) - seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid)); - if (pid->hide_pid != HIDEPID_OFF) - seq_printf(seq, ",hidepid=%u", pid->hide_pid); + if (!gid_eq(fs_info->pid_gid, GLOBAL_ROOT_GID)) + seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, fs_info->pid_gid)); + if (fs_info->hide_pid != HIDEPID_OFF) + seq_printf(seq, ",hidepid=%s", hidepid2str(fs_info->hide_pid)); + if (fs_info->pidonly != PROC_PIDONLY_OFF) + seq_printf(seq, ",subset=pid"); return 0; } @@ -464,6 +478,7 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, static int proc_reg_open(struct inode *inode, struct file *file) { + struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb); struct proc_dir_entry *pde = PDE(inode); int rv = 0; typeof_member(struct proc_ops, proc_open) open; @@ -477,6 +492,9 @@ static int proc_reg_open(struct inode *inode, struct file *file) return rv; } + if (fs_info->pidonly == PROC_PIDONLY_ON) + return -ENOENT; + /* * Ensure that * 1) PDE's ->release hook will be called no matter what diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 8c1f1bb1a5ce..ecc63ce01be7 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -103,11 +103,14 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "SUnreclaim: ", sunreclaim); seq_printf(m, "KernelStack: %8lu kB\n", global_zone_page_state(NR_KERNEL_STACK_KB)); +#ifdef CONFIG_SHADOW_CALL_STACK + seq_printf(m, "ShadowCallStack:%8lu kB\n", + global_zone_page_state(NR_KERNEL_SCS_KB)); +#endif show_val_kb(m, "PageTables: ", global_zone_page_state(NR_PAGETABLE)); - show_val_kb(m, "NFS_Unstable: ", - global_node_page_state(NR_UNSTABLE_NFS)); + show_val_kb(m, "NFS_Unstable: ", 0); show_val_kb(m, "Bounce: ", global_zone_page_state(NR_BOUNCE)); show_val_kb(m, "WritebackTmp: ", diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 4888c5224442..dba63b2429f0 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -98,6 +98,25 @@ static const struct proc_ops proc_net_seq_ops = { .proc_release = seq_release_net, }; +int bpf_iter_init_seq_net(void *priv_data) +{ +#ifdef CONFIG_NET_NS + struct seq_net_private *p = priv_data; + + p->net = get_net(current->nsproxy->net_ns); +#endif + return 0; +} + +void bpf_iter_fini_seq_net(void *priv_data) +{ +#ifdef CONFIG_NET_NS + struct seq_net_private *p = priv_data; + + put_net(p->net); +#endif +} + struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, unsigned int state_size, void *data) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index b6f5d459b087..df2143e05c57 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -539,13 +539,13 @@ out: return err; } -static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, +static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf, size_t count, loff_t *ppos, int write) { struct inode *inode = file_inode(filp); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; - void *new_buf = NULL; + void *kbuf; ssize_t error; if (IS_ERR(head)) @@ -564,27 +564,38 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, if (!table->proc_handler) goto out; - error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, &count, - ppos, &new_buf); + if (write) { + kbuf = memdup_user_nul(ubuf, count); + if (IS_ERR(kbuf)) { + error = PTR_ERR(kbuf); + goto out; + } + } else { + error = -ENOMEM; + kbuf = kzalloc(count, GFP_KERNEL); + if (!kbuf) + goto out; + } + + error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, &kbuf, &count, + ppos); if (error) - goto out; + goto out_free_buf; /* careful: calling conventions are nasty here */ - if (new_buf) { - mm_segment_t old_fs; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - error = table->proc_handler(table, write, (void __user *)new_buf, - &count, ppos); - set_fs(old_fs); - kfree(new_buf); - } else { - error = table->proc_handler(table, write, buf, &count, ppos); + error = table->proc_handler(table, write, kbuf, &count, ppos); + if (error) + goto out_free_buf; + + if (!write) { + error = -EFAULT; + if (copy_to_user(ubuf, kbuf, count)) + goto out_free_buf; } - if (!error) - error = count; + error = count; +out_free_buf: + kfree(kbuf); out: sysctl_head_finish(head); diff --git a/fs/proc/root.c b/fs/proc/root.c index cdbe9293ea55..ffebed1999e5 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -32,21 +32,86 @@ struct proc_fs_context { struct pid_namespace *pid_ns; unsigned int mask; - int hidepid; + enum proc_hidepid hidepid; int gid; + enum proc_pidonly pidonly; }; enum proc_param { Opt_gid, Opt_hidepid, + Opt_subset, }; static const struct fs_parameter_spec proc_fs_parameters[] = { fsparam_u32("gid", Opt_gid), - fsparam_u32("hidepid", Opt_hidepid), + fsparam_string("hidepid", Opt_hidepid), + fsparam_string("subset", Opt_subset), {} }; +static inline int valid_hidepid(unsigned int value) +{ + return (value == HIDEPID_OFF || + value == HIDEPID_NO_ACCESS || + value == HIDEPID_INVISIBLE || + value == HIDEPID_NOT_PTRACEABLE); +} + +static int proc_parse_hidepid_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct proc_fs_context *ctx = fc->fs_private; + struct fs_parameter_spec hidepid_u32_spec = fsparam_u32("hidepid", Opt_hidepid); + struct fs_parse_result result; + int base = (unsigned long)hidepid_u32_spec.data; + + if (param->type != fs_value_is_string) + return invalf(fc, "proc: unexpected type of hidepid value\n"); + + if (!kstrtouint(param->string, base, &result.uint_32)) { + if (!valid_hidepid(result.uint_32)) + return invalf(fc, "proc: unknown value of hidepid - %s\n", param->string); + ctx->hidepid = result.uint_32; + return 0; + } + + if (!strcmp(param->string, "off")) + ctx->hidepid = HIDEPID_OFF; + else if (!strcmp(param->string, "noaccess")) + ctx->hidepid = HIDEPID_NO_ACCESS; + else if (!strcmp(param->string, "invisible")) + ctx->hidepid = HIDEPID_INVISIBLE; + else if (!strcmp(param->string, "ptraceable")) + ctx->hidepid = HIDEPID_NOT_PTRACEABLE; + else + return invalf(fc, "proc: unknown value of hidepid - %s\n", param->string); + + return 0; +} + +static int proc_parse_subset_param(struct fs_context *fc, char *value) +{ + struct proc_fs_context *ctx = fc->fs_private; + + while (value) { + char *ptr = strchr(value, ','); + + if (ptr != NULL) + *ptr++ = '\0'; + + if (*value != '\0') { + if (!strcmp(value, "pid")) { + ctx->pidonly = PROC_PIDONLY_ON; + } else { + return invalf(fc, "proc: unsupported subset option - %s\n", value); + } + } + value = ptr; + } + + return 0; +} + static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct proc_fs_context *ctx = fc->fs_private; @@ -63,10 +128,13 @@ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param) break; case Opt_hidepid: - ctx->hidepid = result.uint_32; - if (ctx->hidepid < HIDEPID_OFF || - ctx->hidepid > HIDEPID_INVISIBLE) - return invalfc(fc, "hidepid value must be between 0 and 2.\n"); + if (proc_parse_hidepid_param(fc, param)) + return -EINVAL; + break; + + case Opt_subset: + if (proc_parse_subset_param(fc, param->string) < 0) + return -EINVAL; break; default: @@ -77,26 +145,33 @@ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param) return 0; } -static void proc_apply_options(struct super_block *s, +static void proc_apply_options(struct proc_fs_info *fs_info, struct fs_context *fc, - struct pid_namespace *pid_ns, struct user_namespace *user_ns) { struct proc_fs_context *ctx = fc->fs_private; if (ctx->mask & (1 << Opt_gid)) - pid_ns->pid_gid = make_kgid(user_ns, ctx->gid); + fs_info->pid_gid = make_kgid(user_ns, ctx->gid); if (ctx->mask & (1 << Opt_hidepid)) - pid_ns->hide_pid = ctx->hidepid; + fs_info->hide_pid = ctx->hidepid; + if (ctx->mask & (1 << Opt_subset)) + fs_info->pidonly = ctx->pidonly; } static int proc_fill_super(struct super_block *s, struct fs_context *fc) { - struct pid_namespace *pid_ns = get_pid_ns(s->s_fs_info); + struct proc_fs_context *ctx = fc->fs_private; struct inode *root_inode; + struct proc_fs_info *fs_info; int ret; - proc_apply_options(s, fc, pid_ns, current_user_ns()); + fs_info = kzalloc(sizeof(*fs_info), GFP_KERNEL); + if (!fs_info) + return -ENOMEM; + + fs_info->pid_ns = get_pid_ns(ctx->pid_ns); + proc_apply_options(fs_info, fc, current_user_ns()); /* User space would break if executables or devices appear on proc */ s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV; @@ -106,6 +181,7 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc) s->s_magic = PROC_SUPER_MAGIC; s->s_op = &proc_sops; s->s_time_gran = 1; + s->s_fs_info = fs_info; /* * procfs isn't actually a stacking filesystem; however, there is @@ -113,7 +189,7 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc) * top of it */ s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; - + /* procfs dentries and inodes don't require IO to create */ s->s_shrink.seeks = 0; @@ -140,19 +216,17 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc) static int proc_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; - struct pid_namespace *pid = sb->s_fs_info; + struct proc_fs_info *fs_info = proc_sb_info(sb); sync_filesystem(sb); - proc_apply_options(sb, fc, pid, current_user_ns()); + proc_apply_options(fs_info, fc, current_user_ns()); return 0; } static int proc_get_tree(struct fs_context *fc) { - struct proc_fs_context *ctx = fc->fs_private; - - return get_tree_keyed(fc, proc_fill_super, ctx->pid_ns); + return get_tree_nodev(fc, proc_fill_super); } static void proc_fs_context_free(struct fs_context *fc) @@ -188,22 +262,17 @@ static int proc_init_fs_context(struct fs_context *fc) static void proc_kill_sb(struct super_block *sb) { - struct pid_namespace *ns; + struct proc_fs_info *fs_info = proc_sb_info(sb); - ns = (struct pid_namespace *)sb->s_fs_info; - if (ns->proc_self) - dput(ns->proc_self); - if (ns->proc_thread_self) - dput(ns->proc_thread_self); - kill_anon_super(sb); + if (fs_info->proc_self) + dput(fs_info->proc_self); - /* Make the pid namespace safe for the next mount of proc */ - ns->proc_self = NULL; - ns->proc_thread_self = NULL; - ns->pid_gid = GLOBAL_ROOT_GID; - ns->hide_pid = 0; + if (fs_info->proc_thread_self) + dput(fs_info->proc_thread_self); - put_pid_ns(ns); + kill_anon_super(sb); + put_pid_ns(fs_info->pid_ns); + kfree(fs_info); } static struct file_system_type proc_fs_type = { diff --git a/fs/proc/self.c b/fs/proc/self.c index 57c0a1047250..ca5158fa561c 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c @@ -12,7 +12,7 @@ static const char *proc_self_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { - struct pid_namespace *ns = proc_pid_ns(inode); + struct pid_namespace *ns = proc_pid_ns(inode->i_sb); pid_t tgid = task_tgid_nr_ns(current, ns); char *name; @@ -36,10 +36,10 @@ static unsigned self_inum __ro_after_init; int proc_setup_self(struct super_block *s) { struct inode *root_inode = d_inode(s->s_root); - struct pid_namespace *ns = proc_pid_ns(root_inode); + struct proc_fs_info *fs_info = proc_sb_info(s); struct dentry *self; int ret = -ENOMEM; - + inode_lock(root_inode); self = d_alloc_name(s->s_root, "self"); if (self) { @@ -62,7 +62,7 @@ int proc_setup_self(struct super_block *s) if (ret) pr_err("proc_fill_super: can't allocate /proc/self\n"); else - ns->proc_self = self; + fs_info->proc_self = self; return ret; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 8d382d4ec067..6ad407d5efe2 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -546,10 +546,17 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); - struct page *page; + struct page *page = NULL; - /* FOLL_DUMP will return -EFAULT on huge zero page */ - page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); + if (pmd_present(*pmd)) { + /* FOLL_DUMP will return -EFAULT on huge zero page */ + page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); + } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { + swp_entry_t entry = pmd_to_swp_entry(*pmd); + + if (is_migration_entry(entry)) + page = migration_entry_to_page(entry); + } if (IS_ERR_OR_NULL(page)) return; if (PageAnon(page)) @@ -578,8 +585,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { - if (pmd_present(*pmd)) - smaps_pmd_entry(pmd, addr, walk); + smaps_pmd_entry(pmd, addr, walk); spin_unlock(ptl); goto out; } @@ -622,9 +628,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) [ilog2(VM_GROWSDOWN)] = "gd", [ilog2(VM_PFNMAP)] = "pf", [ilog2(VM_DENYWRITE)] = "dw", -#ifdef CONFIG_X86_INTEL_MPX - [ilog2(VM_MPX)] = "mp", -#endif [ilog2(VM_LOCKED)] = "lo", [ilog2(VM_IO)] = "io", [ilog2(VM_SEQ_READ)] = "sr", @@ -638,6 +641,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) [ilog2(VM_ARCH_1)] = "ar", [ilog2(VM_WIPEONFORK)] = "wf", [ilog2(VM_DONTDUMP)] = "dd", +#ifdef CONFIG_ARM64_BTI + [ilog2(VM_ARM64_BTI)] = "bt", +#endif #ifdef CONFIG_MEM_SOFT_DIRTY [ilog2(VM_SOFTDIRTY)] = "sd", #endif diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c index f61ae53533f5..ac284f409568 100644 --- a/fs/proc/thread_self.c +++ b/fs/proc/thread_self.c @@ -12,7 +12,7 @@ static const char *proc_thread_self_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { - struct pid_namespace *ns = proc_pid_ns(inode); + struct pid_namespace *ns = proc_pid_ns(inode->i_sb); pid_t tgid = task_tgid_nr_ns(current, ns); pid_t pid = task_pid_nr_ns(current, ns); char *name; @@ -36,7 +36,7 @@ static unsigned thread_self_inum __ro_after_init; int proc_setup_thread_self(struct super_block *s) { struct inode *root_inode = d_inode(s->s_root); - struct pid_namespace *ns = proc_pid_ns(root_inode); + struct proc_fs_info *fs_info = proc_sb_info(s); struct dentry *thread_self; int ret = -ENOMEM; @@ -60,9 +60,9 @@ int proc_setup_thread_self(struct super_block *s) inode_unlock(root_inode); if (ret) - pr_err("proc_fill_super: can't allocate /proc/thread_self\n"); + pr_err("proc_fill_super: can't allocate /proc/thread-self\n"); else - ns->proc_thread_self = thread_self; + fs_info->proc_thread_self = thread_self; return ret; } diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 273ee82d8aa9..3059a9394c2d 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -37,23 +37,23 @@ static __poll_t mounts_poll(struct file *file, poll_table *wait) return res; } -struct proc_fs_info { +struct proc_fs_opts { int flag; const char *str; }; static int show_sb_opts(struct seq_file *m, struct super_block *sb) { - static const struct proc_fs_info fs_info[] = { + static const struct proc_fs_opts fs_opts[] = { { SB_SYNCHRONOUS, ",sync" }, { SB_DIRSYNC, ",dirsync" }, { SB_MANDLOCK, ",mand" }, { SB_LAZYTIME, ",lazytime" }, { 0, NULL } }; - const struct proc_fs_info *fs_infop; + const struct proc_fs_opts *fs_infop; - for (fs_infop = fs_info; fs_infop->flag; fs_infop++) { + for (fs_infop = fs_opts; fs_infop->flag; fs_infop++) { if (sb->s_flags & fs_infop->flag) seq_puts(m, fs_infop->str); } @@ -63,7 +63,7 @@ static int show_sb_opts(struct seq_file *m, struct super_block *sb) static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt) { - static const struct proc_fs_info mnt_info[] = { + static const struct proc_fs_opts mnt_opts[] = { { MNT_NOSUID, ",nosuid" }, { MNT_NODEV, ",nodev" }, { MNT_NOEXEC, ",noexec" }, @@ -72,9 +72,9 @@ static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt) { MNT_RELATIME, ",relatime" }, { 0, NULL } }; - const struct proc_fs_info *fs_infop; + const struct proc_fs_opts *fs_infop; - for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) { + for (fs_infop = mnt_opts; fs_infop->flag; fs_infop++) { if (mnt->mnt_flags & fs_infop->flag) seq_puts(m, fs_infop->str); } @@ -279,7 +279,8 @@ static int mounts_open_common(struct inode *inode, struct file *file, p->ns = ns; p->root = root; p->show = show; - p->cached_event = ~0ULL; + INIT_LIST_HEAD(&p->cursor.mnt_list); + p->cursor.mnt.mnt_flags = MNT_CURSOR; return 0; @@ -296,6 +297,7 @@ static int mounts_release(struct inode *inode, struct file *file) struct seq_file *m = file->private_data; struct proc_mounts *p = m->private; path_put(&p->root); + mnt_cursor_del(p->ns, &p->cursor); put_mnt_ns(p->ns); return seq_release_private(inode, file); } diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig index 8f0369aad22a..e16a49ebfe54 100644 --- a/fs/pstore/Kconfig +++ b/fs/pstore/Kconfig @@ -153,3 +153,112 @@ config PSTORE_RAM "ramoops.ko". For more information, see Documentation/admin-guide/ramoops.rst. + +config PSTORE_ZONE + tristate + depends on PSTORE + help + The common layer for pstore/blk (and pstore/ram in the future) + to manage storage in zones. + +config PSTORE_BLK + tristate "Log panic/oops to a block device" + depends on PSTORE + depends on BLOCK + select PSTORE_ZONE + default n + help + This enables panic and oops message to be logged to a block dev + where it can be read back at some later point. + + For more information, see Documentation/admin-guide/pstore-blk.rst + + If unsure, say N. + +config PSTORE_BLK_BLKDEV + string "block device identifier" + depends on PSTORE_BLK + default "" + help + Which block device should be used for pstore/blk. + + It accepts the following variants: + 1) <hex_major><hex_minor> device number in hexadecimal representation, + with no leading 0x, for example b302. + 2) /dev/<disk_name> represents the device name of disk + 3) /dev/<disk_name><decimal> represents the device name and number + of partition - device number of disk plus the partition number + 4) /dev/<disk_name>p<decimal> - same as the above, this form is + used when disk name of partitioned disk ends with a digit. + 5) PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF representing the + unique id of a partition if the partition table provides it. + The UUID may be either an EFI/GPT UUID, or refer to an MSDOS + partition using the format SSSSSSSS-PP, where SSSSSSSS is a zero- + filled hex representation of the 32-bit "NT disk signature", and PP + is a zero-filled hex representation of the 1-based partition number. + 6) PARTUUID=<UUID>/PARTNROFF=<int> to select a partition in relation + to a partition with a known unique id. + 7) <major>:<minor> major and minor number of the device separated by + a colon. + + NOTE that, both Kconfig and module parameters can configure + pstore/blk, but module parameters have priority over Kconfig. + +config PSTORE_BLK_KMSG_SIZE + int "Size in Kbytes of kmsg dump log to store" + depends on PSTORE_BLK + default 64 + help + This just sets size of kmsg dump (oops, panic, etc) log for + pstore/blk. The size is in KB and must be a multiple of 4. + + NOTE that, both Kconfig and module parameters can configure + pstore/blk, but module parameters have priority over Kconfig. + +config PSTORE_BLK_MAX_REASON + int "Maximum kmsg dump reason to store" + depends on PSTORE_BLK + default 2 + help + The maximum reason for kmsg dumps to store. The default is + 2 (KMSG_DUMP_OOPS), see include/linux/kmsg_dump.h's + enum kmsg_dump_reason for more details. + + NOTE that, both Kconfig and module parameters can configure + pstore/blk, but module parameters have priority over Kconfig. + +config PSTORE_BLK_PMSG_SIZE + int "Size in Kbytes of pmsg to store" + depends on PSTORE_BLK + depends on PSTORE_PMSG + default 64 + help + This just sets size of pmsg (pmsg_size) for pstore/blk. The size is + in KB and must be a multiple of 4. + + NOTE that, both Kconfig and module parameters can configure + pstore/blk, but module parameters have priority over Kconfig. + +config PSTORE_BLK_CONSOLE_SIZE + int "Size in Kbytes of console log to store" + depends on PSTORE_BLK + depends on PSTORE_CONSOLE + default 64 + help + This just sets size of console log (console_size) to store via + pstore/blk. The size is in KB and must be a multiple of 4. + + NOTE that, both Kconfig and module parameters can configure + pstore/blk, but module parameters have priority over Kconfig. + +config PSTORE_BLK_FTRACE_SIZE + int "Size in Kbytes of ftrace log to store" + depends on PSTORE_BLK + depends on PSTORE_FTRACE + default 64 + help + This just sets size of ftrace log (ftrace_size) for pstore/blk. The + size is in KB and must be a multiple of 4. + + NOTE that, both Kconfig and module parameters can configure + pstore/blk, but module parameters have priority over Kconfig. diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile index 967b5891f325..c270467aeece 100644 --- a/fs/pstore/Makefile +++ b/fs/pstore/Makefile @@ -12,3 +12,9 @@ pstore-$(CONFIG_PSTORE_PMSG) += pmsg.o ramoops-objs += ram.o ram_core.o obj-$(CONFIG_PSTORE_RAM) += ramoops.o + +pstore_zone-objs += zone.o +obj-$(CONFIG_PSTORE_ZONE) += pstore_zone.o + +pstore_blk-objs += blk.o +obj-$(CONFIG_PSTORE_BLK) += pstore_blk.o diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c new file mode 100644 index 000000000000..fcd5563dde06 --- /dev/null +++ b/fs/pstore/blk.c @@ -0,0 +1,517 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implements pstore backend driver that write to block (or non-block) storage + * devices, using the pstore/zone API. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include "../../block/blk.h" +#include <linux/blkdev.h> +#include <linux/string.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/pstore_blk.h> +#include <linux/mount.h> +#include <linux/uio.h> + +static long kmsg_size = CONFIG_PSTORE_BLK_KMSG_SIZE; +module_param(kmsg_size, long, 0400); +MODULE_PARM_DESC(kmsg_size, "kmsg dump record size in kbytes"); + +static int max_reason = CONFIG_PSTORE_BLK_MAX_REASON; +module_param(max_reason, int, 0400); +MODULE_PARM_DESC(max_reason, + "maximum reason for kmsg dump (default 2: Oops and Panic)"); + +#if IS_ENABLED(CONFIG_PSTORE_PMSG) +static long pmsg_size = CONFIG_PSTORE_BLK_PMSG_SIZE; +#else +static long pmsg_size = -1; +#endif +module_param(pmsg_size, long, 0400); +MODULE_PARM_DESC(pmsg_size, "pmsg size in kbytes"); + +#if IS_ENABLED(CONFIG_PSTORE_CONSOLE) +static long console_size = CONFIG_PSTORE_BLK_CONSOLE_SIZE; +#else +static long console_size = -1; +#endif +module_param(console_size, long, 0400); +MODULE_PARM_DESC(console_size, "console size in kbytes"); + +#if IS_ENABLED(CONFIG_PSTORE_FTRACE) +static long ftrace_size = CONFIG_PSTORE_BLK_FTRACE_SIZE; +#else +static long ftrace_size = -1; +#endif +module_param(ftrace_size, long, 0400); +MODULE_PARM_DESC(ftrace_size, "ftrace size in kbytes"); + +static bool best_effort; +module_param(best_effort, bool, 0400); +MODULE_PARM_DESC(best_effort, "use best effort to write (i.e. do not require storage driver pstore support, default: off)"); + +/* + * blkdev - the block device to use for pstore storage + * + * Usually, this will be a partition of a block device. + * + * blkdev accepts the following variants: + * 1) <hex_major><hex_minor> device number in hexadecimal representation, + * with no leading 0x, for example b302. + * 2) /dev/<disk_name> represents the device number of disk + * 3) /dev/<disk_name><decimal> represents the device number + * of partition - device number of disk plus the partition number + * 4) /dev/<disk_name>p<decimal> - same as the above, that form is + * used when disk name of partitioned disk ends on a digit. + * 5) PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF representing the + * unique id of a partition if the partition table provides it. + * The UUID may be either an EFI/GPT UUID, or refer to an MSDOS + * partition using the format SSSSSSSS-PP, where SSSSSSSS is a zero- + * filled hex representation of the 32-bit "NT disk signature", and PP + * is a zero-filled hex representation of the 1-based partition number. + * 6) PARTUUID=<UUID>/PARTNROFF=<int> to select a partition in relation to + * a partition with a known unique id. + * 7) <major>:<minor> major and minor number of the device separated by + * a colon. + */ +static char blkdev[80] = CONFIG_PSTORE_BLK_BLKDEV; +module_param_string(blkdev, blkdev, 80, 0400); +MODULE_PARM_DESC(blkdev, "block device for pstore storage"); + +/* + * All globals must only be accessed under the pstore_blk_lock + * during the register/unregister functions. + */ +static DEFINE_MUTEX(pstore_blk_lock); +static struct block_device *psblk_bdev; +static struct pstore_zone_info *pstore_zone_info; +static pstore_blk_panic_write_op blkdev_panic_write; + +struct bdev_info { + dev_t devt; + sector_t nr_sects; + sector_t start_sect; +}; + +#define check_size(name, alignsize) ({ \ + long _##name_ = (name); \ + _##name_ = _##name_ <= 0 ? 0 : (_##name_ * 1024); \ + if (_##name_ & ((alignsize) - 1)) { \ + pr_info(#name " must align to %d\n", \ + (alignsize)); \ + _##name_ = ALIGN(name, (alignsize)); \ + } \ + _##name_; \ +}) + +static int __register_pstore_device(struct pstore_device_info *dev) +{ + int ret; + + lockdep_assert_held(&pstore_blk_lock); + + if (!dev || !dev->total_size || !dev->read || !dev->write) + return -EINVAL; + + /* someone already registered before */ + if (pstore_zone_info) + return -EBUSY; + + pstore_zone_info = kzalloc(sizeof(struct pstore_zone_info), GFP_KERNEL); + if (!pstore_zone_info) + return -ENOMEM; + + /* zero means not limit on which backends to attempt to store. */ + if (!dev->flags) + dev->flags = UINT_MAX; + +#define verify_size(name, alignsize, enabled) { \ + long _##name_; \ + if (enabled) \ + _##name_ = check_size(name, alignsize); \ + else \ + _##name_ = 0; \ + name = _##name_ / 1024; \ + pstore_zone_info->name = _##name_; \ + } + + verify_size(kmsg_size, 4096, dev->flags & PSTORE_FLAGS_DMESG); + verify_size(pmsg_size, 4096, dev->flags & PSTORE_FLAGS_PMSG); + verify_size(console_size, 4096, dev->flags & PSTORE_FLAGS_CONSOLE); + verify_size(ftrace_size, 4096, dev->flags & PSTORE_FLAGS_FTRACE); +#undef verify_size + + pstore_zone_info->total_size = dev->total_size; + pstore_zone_info->max_reason = max_reason; + pstore_zone_info->read = dev->read; + pstore_zone_info->write = dev->write; + pstore_zone_info->erase = dev->erase; + pstore_zone_info->panic_write = dev->panic_write; + pstore_zone_info->name = KBUILD_MODNAME; + pstore_zone_info->owner = THIS_MODULE; + + ret = register_pstore_zone(pstore_zone_info); + if (ret) { + kfree(pstore_zone_info); + pstore_zone_info = NULL; + } + return ret; +} +/** + * register_pstore_device() - register non-block device to pstore/blk + * + * @dev: non-block device information + * + * Return: + * * 0 - OK + * * Others - something error. + */ +int register_pstore_device(struct pstore_device_info *dev) +{ + int ret; + + mutex_lock(&pstore_blk_lock); + ret = __register_pstore_device(dev); + mutex_unlock(&pstore_blk_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(register_pstore_device); + +static void __unregister_pstore_device(struct pstore_device_info *dev) +{ + lockdep_assert_held(&pstore_blk_lock); + if (pstore_zone_info && pstore_zone_info->read == dev->read) { + unregister_pstore_zone(pstore_zone_info); + kfree(pstore_zone_info); + pstore_zone_info = NULL; + } +} + +/** + * unregister_pstore_device() - unregister non-block device from pstore/blk + * + * @dev: non-block device information + */ +void unregister_pstore_device(struct pstore_device_info *dev) +{ + mutex_lock(&pstore_blk_lock); + __unregister_pstore_device(dev); + mutex_unlock(&pstore_blk_lock); +} +EXPORT_SYMBOL_GPL(unregister_pstore_device); + +/** + * psblk_get_bdev() - open block device + * + * @holder: Exclusive holder identifier + * @info: Information about bdev to fill in + * + * Return: pointer to block device on success and others on error. + * + * On success, the returned block_device has reference count of one. + */ +static struct block_device *psblk_get_bdev(void *holder, + struct bdev_info *info) +{ + struct block_device *bdev = ERR_PTR(-ENODEV); + fmode_t mode = FMODE_READ | FMODE_WRITE; + sector_t nr_sects; + + lockdep_assert_held(&pstore_blk_lock); + + if (pstore_zone_info) + return ERR_PTR(-EBUSY); + + if (!blkdev[0]) + return ERR_PTR(-ENODEV); + + if (holder) + mode |= FMODE_EXCL; + bdev = blkdev_get_by_path(blkdev, mode, holder); + if (IS_ERR(bdev)) { + dev_t devt; + + devt = name_to_dev_t(blkdev); + if (devt == 0) + return ERR_PTR(-ENODEV); + bdev = blkdev_get_by_dev(devt, mode, holder); + if (IS_ERR(bdev)) + return bdev; + } + + nr_sects = part_nr_sects_read(bdev->bd_part); + if (!nr_sects) { + pr_err("not enough space for '%s'\n", blkdev); + blkdev_put(bdev, mode); + return ERR_PTR(-ENOSPC); + } + + if (info) { + info->devt = bdev->bd_dev; + info->nr_sects = nr_sects; + info->start_sect = get_start_sect(bdev); + } + + return bdev; +} + +static void psblk_put_bdev(struct block_device *bdev, void *holder) +{ + fmode_t mode = FMODE_READ | FMODE_WRITE; + + lockdep_assert_held(&pstore_blk_lock); + + if (!bdev) + return; + + if (holder) + mode |= FMODE_EXCL; + blkdev_put(bdev, mode); +} + +static ssize_t psblk_generic_blk_read(char *buf, size_t bytes, loff_t pos) +{ + struct block_device *bdev = psblk_bdev; + struct file file; + struct kiocb kiocb; + struct iov_iter iter; + struct kvec iov = {.iov_base = buf, .iov_len = bytes}; + + if (!bdev) + return -ENODEV; + + memset(&file, 0, sizeof(struct file)); + file.f_mapping = bdev->bd_inode->i_mapping; + file.f_flags = O_DSYNC | __O_SYNC | O_NOATIME; + file.f_inode = bdev->bd_inode; + file_ra_state_init(&file.f_ra, file.f_mapping); + + init_sync_kiocb(&kiocb, &file); + kiocb.ki_pos = pos; + iov_iter_kvec(&iter, READ, &iov, 1, bytes); + + return generic_file_read_iter(&kiocb, &iter); +} + +static ssize_t psblk_generic_blk_write(const char *buf, size_t bytes, + loff_t pos) +{ + struct block_device *bdev = psblk_bdev; + struct iov_iter iter; + struct kiocb kiocb; + struct file file; + ssize_t ret; + struct kvec iov = {.iov_base = (void *)buf, .iov_len = bytes}; + + if (!bdev) + return -ENODEV; + + /* Console/Ftrace backend may handle buffer until flush dirty zones */ + if (in_interrupt() || irqs_disabled()) + return -EBUSY; + + memset(&file, 0, sizeof(struct file)); + file.f_mapping = bdev->bd_inode->i_mapping; + file.f_flags = O_DSYNC | __O_SYNC | O_NOATIME; + file.f_inode = bdev->bd_inode; + + init_sync_kiocb(&kiocb, &file); + kiocb.ki_pos = pos; + iov_iter_kvec(&iter, WRITE, &iov, 1, bytes); + + inode_lock(bdev->bd_inode); + ret = generic_write_checks(&kiocb, &iter); + if (ret > 0) + ret = generic_perform_write(&file, &iter, pos); + inode_unlock(bdev->bd_inode); + + if (likely(ret > 0)) { + const struct file_operations f_op = {.fsync = blkdev_fsync}; + + file.f_op = &f_op; + kiocb.ki_pos += ret; + ret = generic_write_sync(&kiocb, ret); + } + return ret; +} + +static ssize_t psblk_blk_panic_write(const char *buf, size_t size, + loff_t off) +{ + int ret; + + if (!blkdev_panic_write) + return -EOPNOTSUPP; + + /* size and off must align to SECTOR_SIZE for block device */ + ret = blkdev_panic_write(buf, off >> SECTOR_SHIFT, + size >> SECTOR_SHIFT); + /* try next zone */ + if (ret == -ENOMSG) + return ret; + return ret ? -EIO : size; +} + +static int __register_pstore_blk(struct pstore_blk_info *info) +{ + char bdev_name[BDEVNAME_SIZE]; + struct block_device *bdev; + struct pstore_device_info dev; + struct bdev_info binfo; + void *holder = blkdev; + int ret = -ENODEV; + + lockdep_assert_held(&pstore_blk_lock); + + /* hold bdev exclusively */ + memset(&binfo, 0, sizeof(binfo)); + bdev = psblk_get_bdev(holder, &binfo); + if (IS_ERR(bdev)) { + pr_err("failed to open '%s'!\n", blkdev); + return PTR_ERR(bdev); + } + + /* only allow driver matching the @blkdev */ + if (!binfo.devt || (!best_effort && + MAJOR(binfo.devt) != info->major)) { + pr_debug("invalid major %u (expect %u)\n", + info->major, MAJOR(binfo.devt)); + ret = -ENODEV; + goto err_put_bdev; + } + + /* psblk_bdev must be assigned before register to pstore/blk */ + psblk_bdev = bdev; + blkdev_panic_write = info->panic_write; + + /* Copy back block device details. */ + info->devt = binfo.devt; + info->nr_sects = binfo.nr_sects; + info->start_sect = binfo.start_sect; + + memset(&dev, 0, sizeof(dev)); + dev.total_size = info->nr_sects << SECTOR_SHIFT; + dev.flags = info->flags; + dev.read = psblk_generic_blk_read; + dev.write = psblk_generic_blk_write; + dev.erase = NULL; + dev.panic_write = info->panic_write ? psblk_blk_panic_write : NULL; + + ret = __register_pstore_device(&dev); + if (ret) + goto err_put_bdev; + + bdevname(bdev, bdev_name); + pr_info("attached %s%s\n", bdev_name, + info->panic_write ? "" : " (no dedicated panic_write!)"); + return 0; + +err_put_bdev: + psblk_bdev = NULL; + blkdev_panic_write = NULL; + psblk_put_bdev(bdev, holder); + return ret; +} + +/** + * register_pstore_blk() - register block device to pstore/blk + * + * @info: details on the desired block device interface + * + * Return: + * * 0 - OK + * * Others - something error. + */ +int register_pstore_blk(struct pstore_blk_info *info) +{ + int ret; + + mutex_lock(&pstore_blk_lock); + ret = __register_pstore_blk(info); + mutex_unlock(&pstore_blk_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(register_pstore_blk); + +static void __unregister_pstore_blk(unsigned int major) +{ + struct pstore_device_info dev = { .read = psblk_generic_blk_read }; + void *holder = blkdev; + + lockdep_assert_held(&pstore_blk_lock); + if (psblk_bdev && MAJOR(psblk_bdev->bd_dev) == major) { + __unregister_pstore_device(&dev); + psblk_put_bdev(psblk_bdev, holder); + blkdev_panic_write = NULL; + psblk_bdev = NULL; + } +} + +/** + * unregister_pstore_blk() - unregister block device from pstore/blk + * + * @major: the major device number of device + */ +void unregister_pstore_blk(unsigned int major) +{ + mutex_lock(&pstore_blk_lock); + __unregister_pstore_blk(major); + mutex_unlock(&pstore_blk_lock); +} +EXPORT_SYMBOL_GPL(unregister_pstore_blk); + +/* get information of pstore/blk */ +int pstore_blk_get_config(struct pstore_blk_config *info) +{ + strncpy(info->device, blkdev, 80); + info->max_reason = max_reason; + info->kmsg_size = check_size(kmsg_size, 4096); + info->pmsg_size = check_size(pmsg_size, 4096); + info->ftrace_size = check_size(ftrace_size, 4096); + info->console_size = check_size(console_size, 4096); + + return 0; +} +EXPORT_SYMBOL_GPL(pstore_blk_get_config); + +static int __init pstore_blk_init(void) +{ + struct pstore_blk_info info = { }; + int ret = 0; + + mutex_lock(&pstore_blk_lock); + if (!pstore_zone_info && best_effort && blkdev[0]) + ret = __register_pstore_blk(&info); + mutex_unlock(&pstore_blk_lock); + + return ret; +} +late_initcall(pstore_blk_init); + +static void __exit pstore_blk_exit(void) +{ + mutex_lock(&pstore_blk_lock); + if (psblk_bdev) + __unregister_pstore_blk(MAJOR(psblk_bdev->bd_dev)); + else { + struct pstore_device_info dev = { }; + + if (pstore_zone_info) + dev.read = pstore_zone_info->read; + __unregister_pstore_device(&dev); + } + mutex_unlock(&pstore_blk_lock); +} +module_exit(pstore_blk_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>"); +MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); +MODULE_DESCRIPTION("pstore backend for block devices"); diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c index bfbfc2698070..5c0450701293 100644 --- a/fs/pstore/ftrace.c +++ b/fs/pstore/ftrace.c @@ -16,6 +16,7 @@ #include <linux/debugfs.h> #include <linux/err.h> #include <linux/cache.h> +#include <linux/slab.h> #include <asm/barrier.h> #include "internal.h" @@ -132,3 +133,56 @@ void pstore_unregister_ftrace(void) debugfs_remove_recursive(pstore_ftrace_dir); } + +ssize_t pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size, + const char *src_log, size_t src_log_size) +{ + size_t dest_size, src_size, total, dest_off, src_off; + size_t dest_idx = 0, src_idx = 0, merged_idx = 0; + void *merged_buf; + struct pstore_ftrace_record *drec, *srec, *mrec; + size_t record_size = sizeof(struct pstore_ftrace_record); + + dest_off = *dest_log_size % record_size; + dest_size = *dest_log_size - dest_off; + + src_off = src_log_size % record_size; + src_size = src_log_size - src_off; + + total = dest_size + src_size; + merged_buf = kmalloc(total, GFP_KERNEL); + if (!merged_buf) + return -ENOMEM; + + drec = (struct pstore_ftrace_record *)(*dest_log + dest_off); + srec = (struct pstore_ftrace_record *)(src_log + src_off); + mrec = (struct pstore_ftrace_record *)(merged_buf); + + while (dest_size > 0 && src_size > 0) { + if (pstore_ftrace_read_timestamp(&drec[dest_idx]) < + pstore_ftrace_read_timestamp(&srec[src_idx])) { + mrec[merged_idx++] = drec[dest_idx++]; + dest_size -= record_size; + } else { + mrec[merged_idx++] = srec[src_idx++]; + src_size -= record_size; + } + } + + while (dest_size > 0) { + mrec[merged_idx++] = drec[dest_idx++]; + dest_size -= record_size; + } + + while (src_size > 0) { + mrec[merged_idx++] = srec[src_idx++]; + src_size -= record_size; + } + + kfree(*dest_log); + *dest_log = merged_buf; + *dest_log_size = total; + + return 0; +} +EXPORT_SYMBOL_GPL(pstore_ftrace_combine_log); diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index d99b5d39aa90..c331efe8de95 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -22,18 +22,21 @@ #include <linux/magic.h> #include <linux/pstore.h> #include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/uaccess.h> #include "internal.h" #define PSTORE_NAMELEN 64 -static DEFINE_SPINLOCK(allpstore_lock); -static LIST_HEAD(allpstore); +static DEFINE_MUTEX(records_list_lock); +static LIST_HEAD(records_list); + +static DEFINE_MUTEX(pstore_sb_lock); +static struct super_block *pstore_sb; struct pstore_private { struct list_head list; + struct dentry *dentry; struct pstore_record *record; size_t total_size; }; @@ -178,10 +181,22 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry) { struct pstore_private *p = d_inode(dentry)->i_private; struct pstore_record *record = p->record; + int rc = 0; if (!record->psi->erase) return -EPERM; + /* Make sure we can't race while removing this file. */ + mutex_lock(&records_list_lock); + if (!list_empty(&p->list)) + list_del_init(&p->list); + else + rc = -ENOENT; + p->dentry = NULL; + mutex_unlock(&records_list_lock); + if (rc) + return rc; + mutex_lock(&record->psi->read_mutex); record->psi->erase(record); mutex_unlock(&record->psi->read_mutex); @@ -192,15 +207,9 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry) static void pstore_evict_inode(struct inode *inode) { struct pstore_private *p = inode->i_private; - unsigned long flags; clear_inode(inode); - if (p) { - spin_lock_irqsave(&allpstore_lock, flags); - list_del(&p->list); - spin_unlock_irqrestore(&allpstore_lock, flags); - free_pstore_private(p); - } + free_pstore_private(p); } static const struct inode_operations pstore_dir_inode_operations = { @@ -278,11 +287,54 @@ static const struct super_operations pstore_ops = { .show_options = pstore_show_options, }; -static struct super_block *pstore_sb; +static struct dentry *psinfo_lock_root(void) +{ + struct dentry *root; -bool pstore_is_mounted(void) + mutex_lock(&pstore_sb_lock); + /* + * Having no backend is fine -- no records appear. + * Not being mounted is fine -- nothing to do. + */ + if (!psinfo || !pstore_sb) { + mutex_unlock(&pstore_sb_lock); + return NULL; + } + + root = pstore_sb->s_root; + inode_lock(d_inode(root)); + mutex_unlock(&pstore_sb_lock); + + return root; +} + +int pstore_put_backend_records(struct pstore_info *psi) { - return pstore_sb != NULL; + struct pstore_private *pos, *tmp; + struct dentry *root; + int rc = 0; + + root = psinfo_lock_root(); + if (!root) + return 0; + + mutex_lock(&records_list_lock); + list_for_each_entry_safe(pos, tmp, &records_list, list) { + if (pos->record->psi == psi) { + list_del_init(&pos->list); + rc = simple_unlink(d_inode(root), pos->dentry); + if (WARN_ON(rc)) + break; + d_drop(pos->dentry); + dput(pos->dentry); + pos->dentry = NULL; + } + } + mutex_unlock(&records_list_lock); + + inode_unlock(d_inode(root)); + + return rc; } /* @@ -297,23 +349,20 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record) int rc = 0; char name[PSTORE_NAMELEN]; struct pstore_private *private, *pos; - unsigned long flags; size_t size = record->size + record->ecc_notice_size; - WARN_ON(!inode_is_locked(d_inode(root))); + if (WARN_ON(!inode_is_locked(d_inode(root)))) + return -EINVAL; - spin_lock_irqsave(&allpstore_lock, flags); - list_for_each_entry(pos, &allpstore, list) { + rc = -EEXIST; + /* Skip records that are already present in the filesystem. */ + mutex_lock(&records_list_lock); + list_for_each_entry(pos, &records_list, list) { if (pos->record->type == record->type && pos->record->id == record->id && - pos->record->psi == record->psi) { - rc = -EEXIST; - break; - } + pos->record->psi == record->psi) + goto fail; } - spin_unlock_irqrestore(&allpstore_lock, flags); - if (rc) - return rc; rc = -ENOMEM; inode = pstore_get_inode(root->d_sb); @@ -334,6 +383,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record) if (!dentry) goto fail_private; + private->dentry = dentry; private->record = record; inode->i_size = private->total_size = size; inode->i_private = private; @@ -343,9 +393,8 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record) d_add(dentry, inode); - spin_lock_irqsave(&allpstore_lock, flags); - list_add(&private->list, &allpstore); - spin_unlock_irqrestore(&allpstore_lock, flags); + list_add(&private->list, &records_list); + mutex_unlock(&records_list_lock); return 0; @@ -353,8 +402,8 @@ fail_private: free_pstore_private(private); fail_inode: iput(inode); - fail: + mutex_unlock(&records_list_lock); return rc; } @@ -366,16 +415,13 @@ fail: */ void pstore_get_records(int quiet) { - struct pstore_info *psi = psinfo; struct dentry *root; - if (!psi || !pstore_sb) + root = psinfo_lock_root(); + if (!root) return; - root = pstore_sb->s_root; - - inode_lock(d_inode(root)); - pstore_get_backend_records(psi, root, quiet); + pstore_get_backend_records(psinfo, root, quiet); inode_unlock(d_inode(root)); } @@ -383,8 +429,6 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; - pstore_sb = sb; - sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; @@ -405,6 +449,10 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent) if (!sb->s_root) return -ENOMEM; + mutex_lock(&pstore_sb_lock); + pstore_sb = sb; + mutex_unlock(&pstore_sb_lock); + pstore_get_records(0); return 0; @@ -418,8 +466,17 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type, static void pstore_kill_sb(struct super_block *sb) { + mutex_lock(&pstore_sb_lock); + WARN_ON(pstore_sb != sb); + kill_litter_super(sb); pstore_sb = NULL; + + mutex_lock(&records_list_lock); + INIT_LIST_HEAD(&records_list); + mutex_unlock(&records_list_lock); + + mutex_unlock(&pstore_sb_lock); } static struct file_system_type pstore_fs_type = { diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h index 7062ea4bc57c..7fb219042f13 100644 --- a/fs/pstore/internal.h +++ b/fs/pstore/internal.h @@ -12,9 +12,18 @@ extern unsigned long kmsg_bytes; #ifdef CONFIG_PSTORE_FTRACE extern void pstore_register_ftrace(void); extern void pstore_unregister_ftrace(void); +ssize_t pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size, + const char *src_log, size_t src_log_size); #else static inline void pstore_register_ftrace(void) {} static inline void pstore_unregister_ftrace(void) {} +static inline ssize_t +pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size, + const char *src_log, size_t src_log_size) +{ + *dest_log_size = 0; + return 0; +} #endif #ifdef CONFIG_PSTORE_PMSG @@ -31,9 +40,9 @@ extern void pstore_set_kmsg_bytes(int); extern void pstore_get_records(int); extern void pstore_get_backend_records(struct pstore_info *psi, struct dentry *root, int quiet); +extern int pstore_put_backend_records(struct pstore_info *psi); extern int pstore_mkfile(struct dentry *root, struct pstore_record *record); -extern bool pstore_is_mounted(void); extern void pstore_record_init(struct pstore_record *record, struct pstore_info *psi); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 408277ee3cdb..a9e297eefdff 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -44,7 +44,7 @@ static int pstore_update_ms = -1; module_param_named(update_ms, pstore_update_ms, int, 0600); MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content " "(default is -1, which means runtime updates are disabled; " - "enabling this option is not safe, it may lead to further " + "enabling this option may not be safe; it may lead to further " "corruption on Oopses)"); /* Names should be in the same order as the enum pstore_type_id */ @@ -69,19 +69,25 @@ static void pstore_dowork(struct work_struct *); static DECLARE_WORK(pstore_work, pstore_dowork); /* - * pstore_lock just protects "psinfo" during - * calls to pstore_register() + * psinfo_lock protects "psinfo" during calls to + * pstore_register(), pstore_unregister(), and + * the filesystem mount/unmount routines. */ -static DEFINE_SPINLOCK(pstore_lock); +static DEFINE_MUTEX(psinfo_lock); struct pstore_info *psinfo; static char *backend; +module_param(backend, charp, 0444); +MODULE_PARM_DESC(backend, "specific backend to use"); + static char *compress = #ifdef CONFIG_PSTORE_COMPRESS_DEFAULT CONFIG_PSTORE_COMPRESS_DEFAULT; #else NULL; #endif +module_param(compress, charp, 0444); +MODULE_PARM_DESC(compress, "compression to use"); /* Compression parameters */ static struct crypto_comp *tfm; @@ -129,24 +135,12 @@ enum pstore_type_id pstore_name_to_type(const char *name) } EXPORT_SYMBOL_GPL(pstore_name_to_type); -static const char *get_reason_str(enum kmsg_dump_reason reason) +static void pstore_timer_kick(void) { - switch (reason) { - case KMSG_DUMP_PANIC: - return "Panic"; - case KMSG_DUMP_OOPS: - return "Oops"; - case KMSG_DUMP_EMERG: - return "Emergency"; - case KMSG_DUMP_RESTART: - return "Restart"; - case KMSG_DUMP_HALT: - return "Halt"; - case KMSG_DUMP_POWEROFF: - return "Poweroff"; - default: - return "Unknown"; - } + if (pstore_update_ms < 0) + return; + + mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms)); } /* @@ -393,7 +387,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, unsigned int part = 1; int ret; - why = get_reason_str(reason); + why = kmsg_dump_reason_str(reason); if (down_trylock(&psinfo->buf_lock)) { /* Failed to acquire lock: give up if we cannot wait. */ @@ -459,8 +453,10 @@ static void pstore_dump(struct kmsg_dumper *dumper, } ret = psinfo->write(&record); - if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) + if (ret == 0 && reason == KMSG_DUMP_OOPS) { pstore_new_entry = 1; + pstore_timer_kick(); + } total += record.size; part++; @@ -503,14 +499,20 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c) } static struct console pstore_console = { - .name = "pstore", .write = pstore_console_write, - .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME, .index = -1, }; static void pstore_register_console(void) { + /* Show which backend is going to get console writes. */ + strscpy(pstore_console.name, psinfo->name, + sizeof(pstore_console.name)); + /* + * Always initialize flags here since prior unregister_console() + * calls may have changed settings (specifically CON_ENABLED). + */ + pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME; register_console(&pstore_console); } @@ -555,8 +557,6 @@ out: */ int pstore_register(struct pstore_info *psi) { - struct module *owner = psi->owner; - if (backend && strcmp(backend, psi->name)) { pr_warn("ignoring unexpected backend '%s'\n", psi->name); return -EPERM; @@ -576,11 +576,11 @@ int pstore_register(struct pstore_info *psi) return -EINVAL; } - spin_lock(&pstore_lock); + mutex_lock(&psinfo_lock); if (psinfo) { pr_warn("backend '%s' already loaded: ignoring '%s'\n", psinfo->name, psi->name); - spin_unlock(&pstore_lock); + mutex_unlock(&psinfo_lock); return -EBUSY; } @@ -589,21 +589,16 @@ int pstore_register(struct pstore_info *psi) psinfo = psi; mutex_init(&psinfo->read_mutex); sema_init(&psinfo->buf_lock, 1); - spin_unlock(&pstore_lock); - - if (owner && !try_module_get(owner)) { - psinfo = NULL; - return -EINVAL; - } if (psi->flags & PSTORE_FLAGS_DMESG) allocate_buf_for_compression(); - if (pstore_is_mounted()) - pstore_get_records(0); + pstore_get_records(0); - if (psi->flags & PSTORE_FLAGS_DMESG) + if (psi->flags & PSTORE_FLAGS_DMESG) { + pstore_dumper.max_reason = psinfo->max_reason; pstore_register_kmsg(); + } if (psi->flags & PSTORE_FLAGS_CONSOLE) pstore_register_console(); if (psi->flags & PSTORE_FLAGS_FTRACE) @@ -612,33 +607,36 @@ int pstore_register(struct pstore_info *psi) pstore_register_pmsg(); /* Start watching for new records, if desired. */ - if (pstore_update_ms >= 0) { - pstore_timer.expires = jiffies + - msecs_to_jiffies(pstore_update_ms); - add_timer(&pstore_timer); - } + pstore_timer_kick(); /* * Update the module parameter backend, so it is visible * through /sys/module/pstore/parameters/backend */ - backend = psi->name; + backend = kstrdup(psi->name, GFP_KERNEL); pr_info("Registered %s as persistent store backend\n", psi->name); - module_put(owner); - + mutex_unlock(&psinfo_lock); return 0; } EXPORT_SYMBOL_GPL(pstore_register); void pstore_unregister(struct pstore_info *psi) { - /* Stop timer and make sure all work has finished. */ - pstore_update_ms = -1; - del_timer_sync(&pstore_timer); - flush_work(&pstore_work); + /* It's okay to unregister nothing. */ + if (!psi) + return; + + mutex_lock(&psinfo_lock); + + /* Only one backend can be registered at a time. */ + if (WARN_ON(psi != psinfo)) { + mutex_unlock(&psinfo_lock); + return; + } + /* Unregister all callbacks. */ if (psi->flags & PSTORE_FLAGS_PMSG) pstore_unregister_pmsg(); if (psi->flags & PSTORE_FLAGS_FTRACE) @@ -648,10 +646,19 @@ void pstore_unregister(struct pstore_info *psi) if (psi->flags & PSTORE_FLAGS_DMESG) pstore_unregister_kmsg(); + /* Stop timer and make sure all work has finished. */ + del_timer_sync(&pstore_timer); + flush_work(&pstore_work); + + /* Remove all backend records from filesystem tree. */ + pstore_put_backend_records(psi); + free_buf_for_compression(); psinfo = NULL; + kfree(backend); backend = NULL; + mutex_unlock(&psinfo_lock); } EXPORT_SYMBOL_GPL(pstore_unregister); @@ -788,9 +795,7 @@ static void pstore_timefunc(struct timer_list *unused) schedule_work(&pstore_work); } - if (pstore_update_ms >= 0) - mod_timer(&pstore_timer, - jiffies + msecs_to_jiffies(pstore_update_ms)); + pstore_timer_kick(); } static void __init pstore_choose_compression(void) @@ -835,11 +840,5 @@ static void __exit pstore_exit(void) } module_exit(pstore_exit) -module_param(compress, charp, 0444); -MODULE_PARM_DESC(compress, "Pstore compression to use"); - -module_param(backend, charp, 0444); -MODULE_PARM_DESC(backend, "Pstore backend to use"); - MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>"); MODULE_LICENSE("GPL"); diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 795622190c01..ca6d8a867285 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -21,6 +21,7 @@ #include <linux/pstore_ram.h> #include <linux/of.h> #include <linux/of_address.h> +#include "internal.h" #define RAMOOPS_KERNMSG_HDR "====" #define MIN_MEM_SIZE 4096UL @@ -53,22 +54,27 @@ MODULE_PARM_DESC(mem_size, "size of reserved RAM used to store oops/panic logs"); static unsigned int mem_type; -module_param(mem_type, uint, 0600); +module_param(mem_type, uint, 0400); MODULE_PARM_DESC(mem_type, "set to 1 to try to use unbuffered memory (default 0)"); -static int dump_oops = 1; -module_param(dump_oops, int, 0600); -MODULE_PARM_DESC(dump_oops, - "set to 1 to dump oopses, 0 to only dump panics (default 1)"); +static int ramoops_max_reason = -1; +module_param_named(max_reason, ramoops_max_reason, int, 0400); +MODULE_PARM_DESC(max_reason, + "maximum reason for kmsg dump (default 2: Oops and Panic) "); static int ramoops_ecc; -module_param_named(ecc, ramoops_ecc, int, 0600); +module_param_named(ecc, ramoops_ecc, int, 0400); MODULE_PARM_DESC(ramoops_ecc, "if non-zero, the option enables ECC support and specifies " "ECC buffer size in bytes (1 is a special value, means 16 " "bytes ECC)"); +static int ramoops_dump_oops = -1; +module_param_named(dump_oops, ramoops_dump_oops, int, 0400); +MODULE_PARM_DESC(dump_oops, + "(deprecated: use max_reason instead) set to 1 to dump oopses & panics, 0 to only dump panics"); + struct ramoops_context { struct persistent_ram_zone **dprzs; /* Oops dump zones */ struct persistent_ram_zone *cprz; /* Console zone */ @@ -81,7 +87,6 @@ struct ramoops_context { size_t console_size; size_t ftrace_size; size_t pmsg_size; - int dump_oops; u32 flags; struct persistent_ram_ecc_info ecc_info; unsigned int max_dump_cnt; @@ -168,58 +173,6 @@ static bool prz_ok(struct persistent_ram_zone *prz) persistent_ram_ecc_string(prz, NULL, 0)); } -static ssize_t ftrace_log_combine(struct persistent_ram_zone *dest, - struct persistent_ram_zone *src) -{ - size_t dest_size, src_size, total, dest_off, src_off; - size_t dest_idx = 0, src_idx = 0, merged_idx = 0; - void *merged_buf; - struct pstore_ftrace_record *drec, *srec, *mrec; - size_t record_size = sizeof(struct pstore_ftrace_record); - - dest_off = dest->old_log_size % record_size; - dest_size = dest->old_log_size - dest_off; - - src_off = src->old_log_size % record_size; - src_size = src->old_log_size - src_off; - - total = dest_size + src_size; - merged_buf = kmalloc(total, GFP_KERNEL); - if (!merged_buf) - return -ENOMEM; - - drec = (struct pstore_ftrace_record *)(dest->old_log + dest_off); - srec = (struct pstore_ftrace_record *)(src->old_log + src_off); - mrec = (struct pstore_ftrace_record *)(merged_buf); - - while (dest_size > 0 && src_size > 0) { - if (pstore_ftrace_read_timestamp(&drec[dest_idx]) < - pstore_ftrace_read_timestamp(&srec[src_idx])) { - mrec[merged_idx++] = drec[dest_idx++]; - dest_size -= record_size; - } else { - mrec[merged_idx++] = srec[src_idx++]; - src_size -= record_size; - } - } - - while (dest_size > 0) { - mrec[merged_idx++] = drec[dest_idx++]; - dest_size -= record_size; - } - - while (src_size > 0) { - mrec[merged_idx++] = srec[src_idx++]; - src_size -= record_size; - } - - kfree(dest->old_log); - dest->old_log = merged_buf; - dest->old_log_size = total; - - return 0; -} - static ssize_t ramoops_pstore_read(struct pstore_record *record) { ssize_t size = 0; @@ -291,7 +244,12 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record) tmp_prz->corrected_bytes += prz_next->corrected_bytes; tmp_prz->bad_blocks += prz_next->bad_blocks; - size = ftrace_log_combine(tmp_prz, prz_next); + + size = pstore_ftrace_combine_log( + &tmp_prz->old_log, + &tmp_prz->old_log_size, + prz_next->old_log, + prz_next->old_log_size); if (size) goto out; } @@ -382,16 +340,14 @@ static int notrace ramoops_pstore_write(struct pstore_record *record) return -EINVAL; /* - * Out of the various dmesg dump types, ramoops is currently designed - * to only store crash logs, rather than storing general kernel logs. + * We could filter on record->reason here if we wanted to (which + * would duplicate what happened before the "max_reason" setting + * was added), but that would defeat the purpose of a system + * changing printk.always_kmsg_dump, so instead log everything that + * the kmsg dumper sends us, since it should be doing the filtering + * based on the combination of printk.always_kmsg_dump and our + * requested "max_reason". */ - if (record->reason != KMSG_DUMP_OOPS && - record->reason != KMSG_DUMP_PANIC) - return -EINVAL; - - /* Skip Oopes when configured to do so. */ - if (record->reason == KMSG_DUMP_OOPS && !cxt->dump_oops) - return -EINVAL; /* * Explicitly only take the first part of any new crash. @@ -644,19 +600,25 @@ static int ramoops_init_prz(const char *name, return 0; } -static int ramoops_parse_dt_size(struct platform_device *pdev, - const char *propname, u32 *value) +/* Read a u32 from a dt property and make sure it's safe for an int. */ +static int ramoops_parse_dt_u32(struct platform_device *pdev, + const char *propname, + u32 default_value, u32 *value) { u32 val32 = 0; int ret; ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); - if (ret < 0 && ret != -EINVAL) { + if (ret == -EINVAL) { + /* field is missing, use default value. */ + val32 = default_value; + } else if (ret < 0) { dev_err(&pdev->dev, "failed to parse property %s: %d\n", propname, ret); return ret; } + /* Sanity check our results. */ if (val32 > INT_MAX) { dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32); return -EOVERFLOW; @@ -687,23 +649,32 @@ static int ramoops_parse_dt(struct platform_device *pdev, pdata->mem_size = resource_size(res); pdata->mem_address = res->start; pdata->mem_type = of_property_read_bool(of_node, "unbuffered"); - pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops"); - -#define parse_size(name, field) { \ - ret = ramoops_parse_dt_size(pdev, name, &value); \ + /* + * Setting "no-dump-oops" is deprecated and will be ignored if + * "max_reason" is also specified. + */ + if (of_property_read_bool(of_node, "no-dump-oops")) + pdata->max_reason = KMSG_DUMP_PANIC; + else + pdata->max_reason = KMSG_DUMP_OOPS; + +#define parse_u32(name, field, default_value) { \ + ret = ramoops_parse_dt_u32(pdev, name, default_value, \ + &value); \ if (ret < 0) \ return ret; \ field = value; \ } - parse_size("record-size", pdata->record_size); - parse_size("console-size", pdata->console_size); - parse_size("ftrace-size", pdata->ftrace_size); - parse_size("pmsg-size", pdata->pmsg_size); - parse_size("ecc-size", pdata->ecc_info.ecc_size); - parse_size("flags", pdata->flags); + parse_u32("record-size", pdata->record_size, 0); + parse_u32("console-size", pdata->console_size, 0); + parse_u32("ftrace-size", pdata->ftrace_size, 0); + parse_u32("pmsg-size", pdata->pmsg_size, 0); + parse_u32("ecc-size", pdata->ecc_info.ecc_size, 0); + parse_u32("flags", pdata->flags, 0); + parse_u32("max-reason", pdata->max_reason, pdata->max_reason); -#undef parse_size +#undef parse_u32 /* * Some old Chromebooks relied on the kernel setting the @@ -785,7 +756,6 @@ static int ramoops_probe(struct platform_device *pdev) cxt->console_size = pdata->console_size; cxt->ftrace_size = pdata->ftrace_size; cxt->pmsg_size = pdata->pmsg_size; - cxt->dump_oops = pdata->dump_oops; cxt->flags = pdata->flags; cxt->ecc_info = pdata->ecc_info; @@ -828,8 +798,10 @@ static int ramoops_probe(struct platform_device *pdev) * the single region size is how to check. */ cxt->pstore.flags = 0; - if (cxt->max_dump_cnt) + if (cxt->max_dump_cnt) { cxt->pstore.flags |= PSTORE_FLAGS_DMESG; + cxt->pstore.max_reason = pdata->max_reason; + } if (cxt->console_size) cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; if (cxt->max_ftrace_cnt) @@ -865,7 +837,7 @@ static int ramoops_probe(struct platform_device *pdev) mem_size = pdata->mem_size; mem_address = pdata->mem_address; record_size = pdata->record_size; - dump_oops = pdata->dump_oops; + ramoops_max_reason = pdata->max_reason; ramoops_console_size = pdata->console_size; ramoops_pmsg_size = pdata->pmsg_size; ramoops_ftrace_size = pdata->ftrace_size; @@ -948,7 +920,16 @@ static void __init ramoops_register_dummy(void) pdata.console_size = ramoops_console_size; pdata.ftrace_size = ramoops_ftrace_size; pdata.pmsg_size = ramoops_pmsg_size; - pdata.dump_oops = dump_oops; + /* If "max_reason" is set, its value has priority over "dump_oops". */ + if (ramoops_max_reason >= 0) + pdata.max_reason = ramoops_max_reason; + /* Otherwise, if "dump_oops" is set, parse it into "max_reason". */ + else if (ramoops_dump_oops != -1) + pdata.max_reason = ramoops_dump_oops ? KMSG_DUMP_OOPS + : KMSG_DUMP_PANIC; + /* And if neither are explicitly set, use the default. */ + else + pdata.max_reason = KMSG_DUMP_OOPS; pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU; /* diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index c917c191e78c..aa8e0b65ff1a 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -283,7 +283,7 @@ static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz, const void __user *s, unsigned int start, unsigned int count) { struct persistent_ram_buffer *buffer = prz->buffer; - int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ? + int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ? -EFAULT : 0; persistent_ram_update_ecc(prz, start, count); return ret; @@ -348,8 +348,6 @@ int notrace persistent_ram_write_user(struct persistent_ram_zone *prz, int rem, ret = 0, c = count; size_t start; - if (unlikely(!access_ok(s, count))) - return -EFAULT; if (unlikely(c > prz->buffer_size)) { s += c - prz->buffer_size; c = prz->buffer_size; diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c new file mode 100644 index 000000000000..819428dfa32f --- /dev/null +++ b/fs/pstore/zone.c @@ -0,0 +1,1465 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Provide a pstore intermediate backend, organized into kernel memory + * allocated zones that are then mapped and flushed into a single + * contiguous region on a storage backend of some kind (block, mtd, etc). + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mount.h> +#include <linux/printk.h> +#include <linux/fs.h> +#include <linux/pstore_zone.h> +#include <linux/kdev_t.h> +#include <linux/device.h> +#include <linux/namei.h> +#include <linux/fcntl.h> +#include <linux/uio.h> +#include <linux/writeback.h> +#include "internal.h" + +/** + * struct psz_head - header of zone to flush to storage + * + * @sig: signature to indicate header (PSZ_SIG xor PSZONE-type value) + * @datalen: length of data in @data + * @start: offset into @data where the beginning of the stored bytes begin + * @data: zone data. + */ +struct psz_buffer { +#define PSZ_SIG (0x43474244) /* DBGC */ + uint32_t sig; + atomic_t datalen; + atomic_t start; + uint8_t data[]; +}; + +/** + * struct psz_kmsg_header - kmsg dump-specific header to flush to storage + * + * @magic: magic num for kmsg dump header + * @time: kmsg dump trigger time + * @compressed: whether conpressed + * @counter: kmsg dump counter + * @reason: the kmsg dump reason (e.g. oops, panic, etc) + * @data: pointer to log data + * + * This is a sub-header for a kmsg dump, trailing after &psz_buffer. + */ +struct psz_kmsg_header { +#define PSTORE_KMSG_HEADER_MAGIC 0x4dfc3ae5 /* Just a random number */ + uint32_t magic; + struct timespec64 time; + bool compressed; + uint32_t counter; + enum kmsg_dump_reason reason; + uint8_t data[]; +}; + +/** + * struct pstore_zone - single stored buffer + * + * @off: zone offset of storage + * @type: front-end type for this zone + * @name: front-end name for this zone + * @buffer: pointer to data buffer managed by this zone + * @oldbuf: pointer to old data buffer + * @buffer_size: bytes in @buffer->data + * @should_recover: whether this zone should recover from storage + * @dirty: whether the data in @buffer dirty + * + * zone structure in memory. + */ +struct pstore_zone { + loff_t off; + const char *name; + enum pstore_type_id type; + + struct psz_buffer *buffer; + struct psz_buffer *oldbuf; + size_t buffer_size; + bool should_recover; + atomic_t dirty; +}; + +/** + * struct psz_context - all about running state of pstore/zone + * + * @kpszs: kmsg dump storage zones + * @ppsz: pmsg storage zone + * @cpsz: console storage zone + * @fpszs: ftrace storage zones + * @kmsg_max_cnt: max count of @kpszs + * @kmsg_read_cnt: counter of total read kmsg dumps + * @kmsg_write_cnt: counter of total kmsg dump writes + * @pmsg_read_cnt: counter of total read pmsg zone + * @console_read_cnt: counter of total read console zone + * @ftrace_max_cnt: max count of @fpszs + * @ftrace_read_cnt: counter of max read ftrace zone + * @oops_counter: counter of oops dumps + * @panic_counter: counter of panic dumps + * @recovered: whether finished recovering data from storage + * @on_panic: whether panic is happening + * @pstore_zone_info_lock: lock to @pstore_zone_info + * @pstore_zone_info: information from backend + * @pstore: structure for pstore + */ +struct psz_context { + struct pstore_zone **kpszs; + struct pstore_zone *ppsz; + struct pstore_zone *cpsz; + struct pstore_zone **fpszs; + unsigned int kmsg_max_cnt; + unsigned int kmsg_read_cnt; + unsigned int kmsg_write_cnt; + unsigned int pmsg_read_cnt; + unsigned int console_read_cnt; + unsigned int ftrace_max_cnt; + unsigned int ftrace_read_cnt; + /* + * These counters should be calculated during recovery. + * It records the oops/panic times after crashes rather than boots. + */ + unsigned int oops_counter; + unsigned int panic_counter; + atomic_t recovered; + atomic_t on_panic; + + /* + * pstore_zone_info_lock protects this entire structure during calls + * to register_pstore_zone()/unregister_pstore_zone(). + */ + struct mutex pstore_zone_info_lock; + struct pstore_zone_info *pstore_zone_info; + struct pstore_info pstore; +}; +static struct psz_context pstore_zone_cxt; + +static void psz_flush_all_dirty_zones(struct work_struct *); +static DECLARE_DELAYED_WORK(psz_cleaner, psz_flush_all_dirty_zones); + +/** + * enum psz_flush_mode - flush mode for psz_zone_write() + * + * @FLUSH_NONE: do not flush to storage but update data on memory + * @FLUSH_PART: just flush part of data including meta data to storage + * @FLUSH_META: just flush meta data of zone to storage + * @FLUSH_ALL: flush all of zone + */ +enum psz_flush_mode { + FLUSH_NONE = 0, + FLUSH_PART, + FLUSH_META, + FLUSH_ALL, +}; + +static inline int buffer_datalen(struct pstore_zone *zone) +{ + return atomic_read(&zone->buffer->datalen); +} + +static inline int buffer_start(struct pstore_zone *zone) +{ + return atomic_read(&zone->buffer->start); +} + +static inline bool is_on_panic(void) +{ + return atomic_read(&pstore_zone_cxt.on_panic); +} + +static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf, + size_t len, unsigned long off) +{ + if (!buf || !zone || !zone->buffer) + return -EINVAL; + if (off > zone->buffer_size) + return -EINVAL; + len = min_t(size_t, len, zone->buffer_size - off); + memcpy(buf, zone->buffer->data + off, len); + return len; +} + +static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf, + size_t len, unsigned long off) +{ + if (!buf || !zone || !zone->oldbuf) + return -EINVAL; + if (off > zone->buffer_size) + return -EINVAL; + len = min_t(size_t, len, zone->buffer_size - off); + memcpy(buf, zone->oldbuf->data + off, len); + return 0; +} + +static int psz_zone_write(struct pstore_zone *zone, + enum psz_flush_mode flush_mode, const char *buf, + size_t len, unsigned long off) +{ + struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info; + ssize_t wcnt = 0; + ssize_t (*writeop)(const char *buf, size_t bytes, loff_t pos); + size_t wlen; + + if (off > zone->buffer_size) + return -EINVAL; + + wlen = min_t(size_t, len, zone->buffer_size - off); + if (buf && wlen) { + memcpy(zone->buffer->data + off, buf, wlen); + atomic_set(&zone->buffer->datalen, wlen + off); + } + + /* avoid to damage old records */ + if (!is_on_panic() && !atomic_read(&pstore_zone_cxt.recovered)) + goto dirty; + + writeop = is_on_panic() ? info->panic_write : info->write; + if (!writeop) + goto dirty; + + switch (flush_mode) { + case FLUSH_NONE: + if (unlikely(buf && wlen)) + goto dirty; + return 0; + case FLUSH_PART: + wcnt = writeop((const char *)zone->buffer->data + off, wlen, + zone->off + sizeof(*zone->buffer) + off); + if (wcnt != wlen) + goto dirty; + fallthrough; + case FLUSH_META: + wlen = sizeof(struct psz_buffer); + wcnt = writeop((const char *)zone->buffer, wlen, zone->off); + if (wcnt != wlen) + goto dirty; + break; + case FLUSH_ALL: + wlen = zone->buffer_size + sizeof(*zone->buffer); + wcnt = writeop((const char *)zone->buffer, wlen, zone->off); + if (wcnt != wlen) + goto dirty; + break; + } + + return 0; +dirty: + /* no need to mark dirty if going to try next zone */ + if (wcnt == -ENOMSG) + return -ENOMSG; + atomic_set(&zone->dirty, true); + /* flush dirty zones nicely */ + if (wcnt == -EBUSY && !is_on_panic()) + schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(500)); + return -EBUSY; +} + +static int psz_flush_dirty_zone(struct pstore_zone *zone) +{ + int ret; + + if (unlikely(!zone)) + return -EINVAL; + + if (unlikely(!atomic_read(&pstore_zone_cxt.recovered))) + return -EBUSY; + + if (!atomic_xchg(&zone->dirty, false)) + return 0; + + ret = psz_zone_write(zone, FLUSH_ALL, NULL, 0, 0); + if (ret) + atomic_set(&zone->dirty, true); + return ret; +} + +static int psz_flush_dirty_zones(struct pstore_zone **zones, unsigned int cnt) +{ + int i, ret; + struct pstore_zone *zone; + + if (!zones) + return -EINVAL; + + for (i = 0; i < cnt; i++) { + zone = zones[i]; + if (!zone) + return -EINVAL; + ret = psz_flush_dirty_zone(zone); + if (ret) + return ret; + } + return 0; +} + +static int psz_move_zone(struct pstore_zone *old, struct pstore_zone *new) +{ + const char *data = (const char *)old->buffer->data; + int ret; + + ret = psz_zone_write(new, FLUSH_ALL, data, buffer_datalen(old), 0); + if (ret) { + atomic_set(&new->buffer->datalen, 0); + atomic_set(&new->dirty, false); + return ret; + } + atomic_set(&old->buffer->datalen, 0); + return 0; +} + +static void psz_flush_all_dirty_zones(struct work_struct *work) +{ + struct psz_context *cxt = &pstore_zone_cxt; + int ret = 0; + + if (cxt->ppsz) + ret |= psz_flush_dirty_zone(cxt->ppsz); + if (cxt->cpsz) + ret |= psz_flush_dirty_zone(cxt->cpsz); + if (cxt->kpszs) + ret |= psz_flush_dirty_zones(cxt->kpszs, cxt->kmsg_max_cnt); + if (cxt->fpszs) + ret |= psz_flush_dirty_zones(cxt->fpszs, cxt->ftrace_max_cnt); + if (ret && cxt->pstore_zone_info) + schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(1000)); +} + +static int psz_kmsg_recover_data(struct psz_context *cxt) +{ + struct pstore_zone_info *info = cxt->pstore_zone_info; + struct pstore_zone *zone = NULL; + struct psz_buffer *buf; + unsigned long i; + ssize_t rcnt; + + if (!info->read) + return -EINVAL; + + for (i = 0; i < cxt->kmsg_max_cnt; i++) { + zone = cxt->kpszs[i]; + if (unlikely(!zone)) + return -EINVAL; + if (atomic_read(&zone->dirty)) { + unsigned int wcnt = cxt->kmsg_write_cnt; + struct pstore_zone *new = cxt->kpszs[wcnt]; + int ret; + + ret = psz_move_zone(zone, new); + if (ret) { + pr_err("move zone from %lu to %d failed\n", + i, wcnt); + return ret; + } + cxt->kmsg_write_cnt = (wcnt + 1) % cxt->kmsg_max_cnt; + } + if (!zone->should_recover) + continue; + buf = zone->buffer; + rcnt = info->read((char *)buf, zone->buffer_size + sizeof(*buf), + zone->off); + if (rcnt != zone->buffer_size + sizeof(*buf)) + return (int)rcnt < 0 ? (int)rcnt : -EIO; + } + return 0; +} + +static int psz_kmsg_recover_meta(struct psz_context *cxt) +{ + struct pstore_zone_info *info = cxt->pstore_zone_info; + struct pstore_zone *zone; + size_t rcnt, len; + struct psz_buffer *buf; + struct psz_kmsg_header *hdr; + struct timespec64 time = { }; + unsigned long i; + /* + * Recover may on panic, we can't allocate any memory by kmalloc. + * So, we use local array instead. + */ + char buffer_header[sizeof(*buf) + sizeof(*hdr)] = {0}; + + if (!info->read) + return -EINVAL; + + len = sizeof(*buf) + sizeof(*hdr); + buf = (struct psz_buffer *)buffer_header; + for (i = 0; i < cxt->kmsg_max_cnt; i++) { + zone = cxt->kpszs[i]; + if (unlikely(!zone)) + return -EINVAL; + + rcnt = info->read((char *)buf, len, zone->off); + if (rcnt == -ENOMSG) { + pr_debug("%s with id %lu may be broken, skip\n", + zone->name, i); + continue; + } else if (rcnt != len) { + pr_err("read %s with id %lu failed\n", zone->name, i); + return (int)rcnt < 0 ? (int)rcnt : -EIO; + } + + if (buf->sig != zone->buffer->sig) { + pr_debug("no valid data in kmsg dump zone %lu\n", i); + continue; + } + + if (zone->buffer_size < atomic_read(&buf->datalen)) { + pr_info("found overtop zone: %s: id %lu, off %lld, size %zu\n", + zone->name, i, zone->off, + zone->buffer_size); + continue; + } + + hdr = (struct psz_kmsg_header *)buf->data; + if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) { + pr_info("found invalid zone: %s: id %lu, off %lld, size %zu\n", + zone->name, i, zone->off, + zone->buffer_size); + continue; + } + + /* + * we get the newest zone, and the next one must be the oldest + * or unused zone, because we do write one by one like a circle. + */ + if (hdr->time.tv_sec >= time.tv_sec) { + time.tv_sec = hdr->time.tv_sec; + cxt->kmsg_write_cnt = (i + 1) % cxt->kmsg_max_cnt; + } + + if (hdr->reason == KMSG_DUMP_OOPS) + cxt->oops_counter = + max(cxt->oops_counter, hdr->counter); + else if (hdr->reason == KMSG_DUMP_PANIC) + cxt->panic_counter = + max(cxt->panic_counter, hdr->counter); + + if (!atomic_read(&buf->datalen)) { + pr_debug("found erased zone: %s: id %lu, off %lld, size %zu, datalen %d\n", + zone->name, i, zone->off, + zone->buffer_size, + atomic_read(&buf->datalen)); + continue; + } + + if (!is_on_panic()) + zone->should_recover = true; + pr_debug("found nice zone: %s: id %lu, off %lld, size %zu, datalen %d\n", + zone->name, i, zone->off, + zone->buffer_size, atomic_read(&buf->datalen)); + } + + return 0; +} + +static int psz_kmsg_recover(struct psz_context *cxt) +{ + int ret; + + if (!cxt->kpszs) + return 0; + + ret = psz_kmsg_recover_meta(cxt); + if (ret) + goto recover_fail; + + ret = psz_kmsg_recover_data(cxt); + if (ret) + goto recover_fail; + + return 0; +recover_fail: + pr_debug("psz_recover_kmsg failed\n"); + return ret; +} + +static int psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone) +{ + struct pstore_zone_info *info = cxt->pstore_zone_info; + struct psz_buffer *oldbuf, tmpbuf; + int ret = 0; + char *buf; + ssize_t rcnt, len, start, off; + + if (!zone || zone->oldbuf) + return 0; + + if (is_on_panic()) { + /* save data as much as possible */ + psz_flush_dirty_zone(zone); + return 0; + } + + if (unlikely(!info->read)) + return -EINVAL; + + len = sizeof(struct psz_buffer); + rcnt = info->read((char *)&tmpbuf, len, zone->off); + if (rcnt != len) { + pr_debug("read zone %s failed\n", zone->name); + return (int)rcnt < 0 ? (int)rcnt : -EIO; + } + + if (tmpbuf.sig != zone->buffer->sig) { + pr_debug("no valid data in zone %s\n", zone->name); + return 0; + } + + if (zone->buffer_size < atomic_read(&tmpbuf.datalen) || + zone->buffer_size < atomic_read(&tmpbuf.start)) { + pr_info("found overtop zone: %s: off %lld, size %zu\n", + zone->name, zone->off, zone->buffer_size); + /* just keep going */ + return 0; + } + + if (!atomic_read(&tmpbuf.datalen)) { + pr_debug("found erased zone: %s: off %lld, size %zu, datalen %d\n", + zone->name, zone->off, zone->buffer_size, + atomic_read(&tmpbuf.datalen)); + return 0; + } + + pr_debug("found nice zone: %s: off %lld, size %zu, datalen %d\n", + zone->name, zone->off, zone->buffer_size, + atomic_read(&tmpbuf.datalen)); + + len = atomic_read(&tmpbuf.datalen) + sizeof(*oldbuf); + oldbuf = kzalloc(len, GFP_KERNEL); + if (!oldbuf) + return -ENOMEM; + + memcpy(oldbuf, &tmpbuf, sizeof(*oldbuf)); + buf = (char *)oldbuf + sizeof(*oldbuf); + len = atomic_read(&oldbuf->datalen); + start = atomic_read(&oldbuf->start); + off = zone->off + sizeof(*oldbuf); + + /* get part of data */ + rcnt = info->read(buf, len - start, off + start); + if (rcnt != len - start) { + pr_err("read zone %s failed\n", zone->name); + ret = (int)rcnt < 0 ? (int)rcnt : -EIO; + goto free_oldbuf; + } + + /* get the rest of data */ + rcnt = info->read(buf + len - start, start, off); + if (rcnt != start) { + pr_err("read zone %s failed\n", zone->name); + ret = (int)rcnt < 0 ? (int)rcnt : -EIO; + goto free_oldbuf; + } + + zone->oldbuf = oldbuf; + psz_flush_dirty_zone(zone); + return 0; + +free_oldbuf: + kfree(oldbuf); + return ret; +} + +static int psz_recover_zones(struct psz_context *cxt, + struct pstore_zone **zones, unsigned int cnt) +{ + int ret; + unsigned int i; + struct pstore_zone *zone; + + if (!zones) + return 0; + + for (i = 0; i < cnt; i++) { + zone = zones[i]; + if (unlikely(!zone)) + continue; + ret = psz_recover_zone(cxt, zone); + if (ret) + goto recover_fail; + } + + return 0; +recover_fail: + pr_debug("recover %s[%u] failed\n", zone->name, i); + return ret; +} + +/** + * psz_recovery() - recover data from storage + * @cxt: the context of pstore/zone + * + * recovery means reading data back from storage after rebooting + * + * Return: 0 on success, others on failure. + */ +static inline int psz_recovery(struct psz_context *cxt) +{ + int ret; + + if (atomic_read(&cxt->recovered)) + return 0; + + ret = psz_kmsg_recover(cxt); + if (ret) + goto out; + + ret = psz_recover_zone(cxt, cxt->ppsz); + if (ret) + goto out; + + ret = psz_recover_zone(cxt, cxt->cpsz); + if (ret) + goto out; + + ret = psz_recover_zones(cxt, cxt->fpszs, cxt->ftrace_max_cnt); + +out: + if (unlikely(ret)) + pr_err("recover failed\n"); + else { + pr_debug("recover end!\n"); + atomic_set(&cxt->recovered, 1); + } + return ret; +} + +static int psz_pstore_open(struct pstore_info *psi) +{ + struct psz_context *cxt = psi->data; + + cxt->kmsg_read_cnt = 0; + cxt->pmsg_read_cnt = 0; + cxt->console_read_cnt = 0; + cxt->ftrace_read_cnt = 0; + return 0; +} + +static inline bool psz_old_ok(struct pstore_zone *zone) +{ + if (zone && zone->oldbuf && atomic_read(&zone->oldbuf->datalen)) + return true; + return false; +} + +static inline bool psz_ok(struct pstore_zone *zone) +{ + if (zone && zone->buffer && buffer_datalen(zone)) + return true; + return false; +} + +static inline int psz_kmsg_erase(struct psz_context *cxt, + struct pstore_zone *zone, struct pstore_record *record) +{ + struct psz_buffer *buffer = zone->buffer; + struct psz_kmsg_header *hdr = + (struct psz_kmsg_header *)buffer->data; + size_t size; + + if (unlikely(!psz_ok(zone))) + return 0; + + /* this zone is already updated, no need to erase */ + if (record->count != hdr->counter) + return 0; + + size = buffer_datalen(zone) + sizeof(*zone->buffer); + atomic_set(&zone->buffer->datalen, 0); + if (cxt->pstore_zone_info->erase) + return cxt->pstore_zone_info->erase(size, zone->off); + else + return psz_zone_write(zone, FLUSH_META, NULL, 0, 0); +} + +static inline int psz_record_erase(struct psz_context *cxt, + struct pstore_zone *zone) +{ + if (unlikely(!psz_old_ok(zone))) + return 0; + + kfree(zone->oldbuf); + zone->oldbuf = NULL; + /* + * if there are new data in zone buffer, that means the old data + * are already invalid. It is no need to flush 0 (erase) to + * block device. + */ + if (!buffer_datalen(zone)) + return psz_zone_write(zone, FLUSH_META, NULL, 0, 0); + psz_flush_dirty_zone(zone); + return 0; +} + +static int psz_pstore_erase(struct pstore_record *record) +{ + struct psz_context *cxt = record->psi->data; + + switch (record->type) { + case PSTORE_TYPE_DMESG: + if (record->id >= cxt->kmsg_max_cnt) + return -EINVAL; + return psz_kmsg_erase(cxt, cxt->kpszs[record->id], record); + case PSTORE_TYPE_PMSG: + return psz_record_erase(cxt, cxt->ppsz); + case PSTORE_TYPE_CONSOLE: + return psz_record_erase(cxt, cxt->cpsz); + case PSTORE_TYPE_FTRACE: + if (record->id >= cxt->ftrace_max_cnt) + return -EINVAL; + return psz_record_erase(cxt, cxt->fpszs[record->id]); + default: return -EINVAL; + } +} + +static void psz_write_kmsg_hdr(struct pstore_zone *zone, + struct pstore_record *record) +{ + struct psz_context *cxt = record->psi->data; + struct psz_buffer *buffer = zone->buffer; + struct psz_kmsg_header *hdr = + (struct psz_kmsg_header *)buffer->data; + + hdr->magic = PSTORE_KMSG_HEADER_MAGIC; + hdr->compressed = record->compressed; + hdr->time.tv_sec = record->time.tv_sec; + hdr->time.tv_nsec = record->time.tv_nsec; + hdr->reason = record->reason; + if (hdr->reason == KMSG_DUMP_OOPS) + hdr->counter = ++cxt->oops_counter; + else if (hdr->reason == KMSG_DUMP_PANIC) + hdr->counter = ++cxt->panic_counter; + else + hdr->counter = 0; +} + +/* + * In case zone is broken, which may occur to MTD device, we try each zones, + * start at cxt->kmsg_write_cnt. + */ +static inline int notrace psz_kmsg_write_record(struct psz_context *cxt, + struct pstore_record *record) +{ + size_t size, hlen; + struct pstore_zone *zone; + unsigned int i; + + for (i = 0; i < cxt->kmsg_max_cnt; i++) { + unsigned int zonenum, len; + int ret; + + zonenum = (cxt->kmsg_write_cnt + i) % cxt->kmsg_max_cnt; + zone = cxt->kpszs[zonenum]; + if (unlikely(!zone)) + return -ENOSPC; + + /* avoid destroying old data, allocate a new one */ + len = zone->buffer_size + sizeof(*zone->buffer); + zone->oldbuf = zone->buffer; + zone->buffer = kzalloc(len, GFP_KERNEL); + if (!zone->buffer) { + zone->buffer = zone->oldbuf; + return -ENOMEM; + } + zone->buffer->sig = zone->oldbuf->sig; + + pr_debug("write %s to zone id %d\n", zone->name, zonenum); + psz_write_kmsg_hdr(zone, record); + hlen = sizeof(struct psz_kmsg_header); + size = min_t(size_t, record->size, zone->buffer_size - hlen); + ret = psz_zone_write(zone, FLUSH_ALL, record->buf, size, hlen); + if (likely(!ret || ret != -ENOMSG)) { + cxt->kmsg_write_cnt = zonenum + 1; + cxt->kmsg_write_cnt %= cxt->kmsg_max_cnt; + /* no need to try next zone, free last zone buffer */ + kfree(zone->oldbuf); + zone->oldbuf = NULL; + return ret; + } + + pr_debug("zone %u may be broken, try next dmesg zone\n", + zonenum); + kfree(zone->buffer); + zone->buffer = zone->oldbuf; + zone->oldbuf = NULL; + } + + return -EBUSY; +} + +static int notrace psz_kmsg_write(struct psz_context *cxt, + struct pstore_record *record) +{ + int ret; + + /* + * Explicitly only take the first part of any new crash. + * If our buffer is larger than kmsg_bytes, this can never happen, + * and if our buffer is smaller than kmsg_bytes, we don't want the + * report split across multiple records. + */ + if (record->part != 1) + return -ENOSPC; + + if (!cxt->kpszs) + return -ENOSPC; + + ret = psz_kmsg_write_record(cxt, record); + if (!ret && is_on_panic()) { + /* ensure all data are flushed to storage when panic */ + pr_debug("try to flush other dirty zones\n"); + psz_flush_all_dirty_zones(NULL); + } + + /* always return 0 as we had handled it on buffer */ + return 0; +} + +static int notrace psz_record_write(struct pstore_zone *zone, + struct pstore_record *record) +{ + size_t start, rem; + bool is_full_data = false; + char *buf; + int cnt; + + if (!zone || !record) + return -ENOSPC; + + if (atomic_read(&zone->buffer->datalen) >= zone->buffer_size) + is_full_data = true; + + cnt = record->size; + buf = record->buf; + if (unlikely(cnt > zone->buffer_size)) { + buf += cnt - zone->buffer_size; + cnt = zone->buffer_size; + } + + start = buffer_start(zone); + rem = zone->buffer_size - start; + if (unlikely(rem < cnt)) { + psz_zone_write(zone, FLUSH_PART, buf, rem, start); + buf += rem; + cnt -= rem; + start = 0; + is_full_data = true; + } + + atomic_set(&zone->buffer->start, cnt + start); + psz_zone_write(zone, FLUSH_PART, buf, cnt, start); + + /** + * psz_zone_write will set datalen as start + cnt. + * It work if actual data length lesser than buffer size. + * If data length greater than buffer size, pmsg will rewrite to + * beginning of zone, which make buffer->datalen wrongly. + * So we should reset datalen as buffer size once actual data length + * greater than buffer size. + */ + if (is_full_data) { + atomic_set(&zone->buffer->datalen, zone->buffer_size); + psz_zone_write(zone, FLUSH_META, NULL, 0, 0); + } + return 0; +} + +static int notrace psz_pstore_write(struct pstore_record *record) +{ + struct psz_context *cxt = record->psi->data; + + if (record->type == PSTORE_TYPE_DMESG && + record->reason == KMSG_DUMP_PANIC) + atomic_set(&cxt->on_panic, 1); + + /* + * if on panic, do not write except panic records + * Fix case that panic_write prints log which wakes up console backend. + */ + if (is_on_panic() && record->type != PSTORE_TYPE_DMESG) + return -EBUSY; + + switch (record->type) { + case PSTORE_TYPE_DMESG: + return psz_kmsg_write(cxt, record); + case PSTORE_TYPE_CONSOLE: + return psz_record_write(cxt->cpsz, record); + case PSTORE_TYPE_PMSG: + return psz_record_write(cxt->ppsz, record); + case PSTORE_TYPE_FTRACE: { + int zonenum = smp_processor_id(); + + if (!cxt->fpszs) + return -ENOSPC; + return psz_record_write(cxt->fpszs[zonenum], record); + } + default: + return -EINVAL; + } +} + +static struct pstore_zone *psz_read_next_zone(struct psz_context *cxt) +{ + struct pstore_zone *zone = NULL; + + while (cxt->kmsg_read_cnt < cxt->kmsg_max_cnt) { + zone = cxt->kpszs[cxt->kmsg_read_cnt++]; + if (psz_ok(zone)) + return zone; + } + + if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt) + /* + * No need psz_old_ok(). Let psz_ftrace_read() do so for + * combination. psz_ftrace_read() should traverse over + * all zones in case of some zone without data. + */ + return cxt->fpszs[cxt->ftrace_read_cnt++]; + + if (cxt->pmsg_read_cnt == 0) { + cxt->pmsg_read_cnt++; + zone = cxt->ppsz; + if (psz_old_ok(zone)) + return zone; + } + + if (cxt->console_read_cnt == 0) { + cxt->console_read_cnt++; + zone = cxt->cpsz; + if (psz_old_ok(zone)) + return zone; + } + + return NULL; +} + +static int psz_kmsg_read_hdr(struct pstore_zone *zone, + struct pstore_record *record) +{ + struct psz_buffer *buffer = zone->buffer; + struct psz_kmsg_header *hdr = + (struct psz_kmsg_header *)buffer->data; + + if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) + return -EINVAL; + record->compressed = hdr->compressed; + record->time.tv_sec = hdr->time.tv_sec; + record->time.tv_nsec = hdr->time.tv_nsec; + record->reason = hdr->reason; + record->count = hdr->counter; + return 0; +} + +static ssize_t psz_kmsg_read(struct pstore_zone *zone, + struct pstore_record *record) +{ + ssize_t size, hlen = 0; + + size = buffer_datalen(zone); + /* Clear and skip this kmsg dump record if it has no valid header */ + if (psz_kmsg_read_hdr(zone, record)) { + atomic_set(&zone->buffer->datalen, 0); + atomic_set(&zone->dirty, 0); + return -ENOMSG; + } + size -= sizeof(struct psz_kmsg_header); + + if (!record->compressed) { + char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n", + kmsg_dump_reason_str(record->reason), + record->count); + hlen = strlen(buf); + record->buf = krealloc(buf, hlen + size, GFP_KERNEL); + if (!record->buf) { + kfree(buf); + return -ENOMEM; + } + } else { + record->buf = kmalloc(size, GFP_KERNEL); + if (!record->buf) + return -ENOMEM; + } + + size = psz_zone_read_buffer(zone, record->buf + hlen, size, + sizeof(struct psz_kmsg_header)); + if (unlikely(size < 0)) { + kfree(record->buf); + return -ENOMSG; + } + + return size + hlen; +} + +/* try to combine all ftrace zones */ +static ssize_t psz_ftrace_read(struct pstore_zone *zone, + struct pstore_record *record) +{ + struct psz_context *cxt; + struct psz_buffer *buf; + int ret; + + if (!zone || !record) + return -ENOSPC; + + if (!psz_old_ok(zone)) + goto out; + + buf = (struct psz_buffer *)zone->oldbuf; + if (!buf) + return -ENOMSG; + + ret = pstore_ftrace_combine_log(&record->buf, &record->size, + (char *)buf->data, atomic_read(&buf->datalen)); + if (unlikely(ret)) + return ret; + +out: + cxt = record->psi->data; + if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt) + /* then, read next ftrace zone */ + return -ENOMSG; + record->id = 0; + return record->size ? record->size : -ENOMSG; +} + +static ssize_t psz_record_read(struct pstore_zone *zone, + struct pstore_record *record) +{ + size_t len; + struct psz_buffer *buf; + + if (!zone || !record) + return -ENOSPC; + + buf = (struct psz_buffer *)zone->oldbuf; + if (!buf) + return -ENOMSG; + + len = atomic_read(&buf->datalen); + record->buf = kmalloc(len, GFP_KERNEL); + if (!record->buf) + return -ENOMEM; + + if (unlikely(psz_zone_read_oldbuf(zone, record->buf, len, 0))) { + kfree(record->buf); + return -ENOMSG; + } + + return len; +} + +static ssize_t psz_pstore_read(struct pstore_record *record) +{ + struct psz_context *cxt = record->psi->data; + ssize_t (*readop)(struct pstore_zone *zone, + struct pstore_record *record); + struct pstore_zone *zone; + ssize_t ret; + + /* before read, we must recover from storage */ + ret = psz_recovery(cxt); + if (ret) + return ret; + +next_zone: + zone = psz_read_next_zone(cxt); + if (!zone) + return 0; + + record->type = zone->type; + switch (record->type) { + case PSTORE_TYPE_DMESG: + readop = psz_kmsg_read; + record->id = cxt->kmsg_read_cnt - 1; + break; + case PSTORE_TYPE_FTRACE: + readop = psz_ftrace_read; + break; + case PSTORE_TYPE_CONSOLE: + fallthrough; + case PSTORE_TYPE_PMSG: + readop = psz_record_read; + break; + default: + goto next_zone; + } + + ret = readop(zone, record); + if (ret == -ENOMSG) + goto next_zone; + return ret; +} + +static struct psz_context pstore_zone_cxt = { + .pstore_zone_info_lock = + __MUTEX_INITIALIZER(pstore_zone_cxt.pstore_zone_info_lock), + .recovered = ATOMIC_INIT(0), + .on_panic = ATOMIC_INIT(0), + .pstore = { + .owner = THIS_MODULE, + .open = psz_pstore_open, + .read = psz_pstore_read, + .write = psz_pstore_write, + .erase = psz_pstore_erase, + }, +}; + +static void psz_free_zone(struct pstore_zone **pszone) +{ + struct pstore_zone *zone = *pszone; + + if (!zone) + return; + + kfree(zone->buffer); + kfree(zone); + *pszone = NULL; +} + +static void psz_free_zones(struct pstore_zone ***pszones, unsigned int *cnt) +{ + struct pstore_zone **zones = *pszones; + + if (!zones) + return; + + while (*cnt > 0) { + (*cnt)--; + psz_free_zone(&(zones[*cnt])); + } + kfree(zones); + *pszones = NULL; +} + +static void psz_free_all_zones(struct psz_context *cxt) +{ + if (cxt->kpszs) + psz_free_zones(&cxt->kpszs, &cxt->kmsg_max_cnt); + if (cxt->ppsz) + psz_free_zone(&cxt->ppsz); + if (cxt->cpsz) + psz_free_zone(&cxt->cpsz); + if (cxt->fpszs) + psz_free_zones(&cxt->fpszs, &cxt->ftrace_max_cnt); +} + +static struct pstore_zone *psz_init_zone(enum pstore_type_id type, + loff_t *off, size_t size) +{ + struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info; + struct pstore_zone *zone; + const char *name = pstore_type_to_name(type); + + if (!size) + return NULL; + + if (*off + size > info->total_size) { + pr_err("no room for %s (0x%zx@0x%llx over 0x%lx)\n", + name, size, *off, info->total_size); + return ERR_PTR(-ENOMEM); + } + + zone = kzalloc(sizeof(struct pstore_zone), GFP_KERNEL); + if (!zone) + return ERR_PTR(-ENOMEM); + + zone->buffer = kmalloc(size, GFP_KERNEL); + if (!zone->buffer) { + kfree(zone); + return ERR_PTR(-ENOMEM); + } + memset(zone->buffer, 0xFF, size); + zone->off = *off; + zone->name = name; + zone->type = type; + zone->buffer_size = size - sizeof(struct psz_buffer); + zone->buffer->sig = type ^ PSZ_SIG; + zone->oldbuf = NULL; + atomic_set(&zone->dirty, 0); + atomic_set(&zone->buffer->datalen, 0); + atomic_set(&zone->buffer->start, 0); + + *off += size; + + pr_debug("pszone %s: off 0x%llx, %zu header, %zu data\n", zone->name, + zone->off, sizeof(*zone->buffer), zone->buffer_size); + return zone; +} + +static struct pstore_zone **psz_init_zones(enum pstore_type_id type, + loff_t *off, size_t total_size, ssize_t record_size, + unsigned int *cnt) +{ + struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info; + struct pstore_zone **zones, *zone; + const char *name = pstore_type_to_name(type); + int c, i; + + *cnt = 0; + if (!total_size || !record_size) + return NULL; + + if (*off + total_size > info->total_size) { + pr_err("no room for zones %s (0x%zx@0x%llx over 0x%lx)\n", + name, total_size, *off, info->total_size); + return ERR_PTR(-ENOMEM); + } + + c = total_size / record_size; + zones = kcalloc(c, sizeof(*zones), GFP_KERNEL); + if (!zones) { + pr_err("allocate for zones %s failed\n", name); + return ERR_PTR(-ENOMEM); + } + memset(zones, 0, c * sizeof(*zones)); + + for (i = 0; i < c; i++) { + zone = psz_init_zone(type, off, record_size); + if (!zone || IS_ERR(zone)) { + pr_err("initialize zones %s failed\n", name); + psz_free_zones(&zones, &i); + return (void *)zone; + } + zones[i] = zone; + } + + *cnt = c; + return zones; +} + +static int psz_alloc_zones(struct psz_context *cxt) +{ + struct pstore_zone_info *info = cxt->pstore_zone_info; + loff_t off = 0; + int err; + size_t off_size = 0; + + off_size += info->pmsg_size; + cxt->ppsz = psz_init_zone(PSTORE_TYPE_PMSG, &off, info->pmsg_size); + if (IS_ERR(cxt->ppsz)) { + err = PTR_ERR(cxt->ppsz); + cxt->ppsz = NULL; + goto free_out; + } + + off_size += info->console_size; + cxt->cpsz = psz_init_zone(PSTORE_TYPE_CONSOLE, &off, + info->console_size); + if (IS_ERR(cxt->cpsz)) { + err = PTR_ERR(cxt->cpsz); + cxt->cpsz = NULL; + goto free_out; + } + + off_size += info->ftrace_size; + cxt->fpszs = psz_init_zones(PSTORE_TYPE_FTRACE, &off, + info->ftrace_size, + info->ftrace_size / nr_cpu_ids, + &cxt->ftrace_max_cnt); + if (IS_ERR(cxt->fpszs)) { + err = PTR_ERR(cxt->fpszs); + cxt->fpszs = NULL; + goto free_out; + } + + cxt->kpszs = psz_init_zones(PSTORE_TYPE_DMESG, &off, + info->total_size - off_size, + info->kmsg_size, &cxt->kmsg_max_cnt); + if (IS_ERR(cxt->kpszs)) { + err = PTR_ERR(cxt->kpszs); + cxt->kpszs = NULL; + goto free_out; + } + + return 0; +free_out: + psz_free_all_zones(cxt); + return err; +} + +/** + * register_pstore_zone() - register to pstore/zone + * + * @info: back-end driver information. See &struct pstore_zone_info. + * + * Only one back-end at one time. + * + * Return: 0 on success, others on failure. + */ +int register_pstore_zone(struct pstore_zone_info *info) +{ + int err = -EINVAL; + struct psz_context *cxt = &pstore_zone_cxt; + + if (info->total_size < 4096) { + pr_warn("total_size must be >= 4096\n"); + return -EINVAL; + } + + if (!info->kmsg_size && !info->pmsg_size && !info->console_size && + !info->ftrace_size) { + pr_warn("at least one record size must be non-zero\n"); + return -EINVAL; + } + + if (!info->name || !info->name[0]) + return -EINVAL; + +#define check_size(name, size) { \ + if (info->name > 0 && info->name < (size)) { \ + pr_err(#name " must be over %d\n", (size)); \ + return -EINVAL; \ + } \ + if (info->name & (size - 1)) { \ + pr_err(#name " must be a multiple of %d\n", \ + (size)); \ + return -EINVAL; \ + } \ + } + + check_size(total_size, 4096); + check_size(kmsg_size, SECTOR_SIZE); + check_size(pmsg_size, SECTOR_SIZE); + check_size(console_size, SECTOR_SIZE); + check_size(ftrace_size, SECTOR_SIZE); + +#undef check_size + + /* + * the @read and @write must be applied. + * if no @read, pstore may mount failed. + * if no @write, pstore do not support to remove record file. + */ + if (!info->read || !info->write) { + pr_err("no valid general read/write interface\n"); + return -EINVAL; + } + + mutex_lock(&cxt->pstore_zone_info_lock); + if (cxt->pstore_zone_info) { + pr_warn("'%s' already loaded: ignoring '%s'\n", + cxt->pstore_zone_info->name, info->name); + mutex_unlock(&cxt->pstore_zone_info_lock); + return -EBUSY; + } + cxt->pstore_zone_info = info; + + pr_debug("register %s with properties:\n", info->name); + pr_debug("\ttotal size : %ld Bytes\n", info->total_size); + pr_debug("\tkmsg size : %ld Bytes\n", info->kmsg_size); + pr_debug("\tpmsg size : %ld Bytes\n", info->pmsg_size); + pr_debug("\tconsole size : %ld Bytes\n", info->console_size); + pr_debug("\tftrace size : %ld Bytes\n", info->ftrace_size); + + err = psz_alloc_zones(cxt); + if (err) { + pr_err("alloc zones failed\n"); + goto fail_out; + } + + if (info->kmsg_size) { + cxt->pstore.bufsize = cxt->kpszs[0]->buffer_size - + sizeof(struct psz_kmsg_header); + cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL); + if (!cxt->pstore.buf) { + err = -ENOMEM; + goto fail_free; + } + } + cxt->pstore.data = cxt; + + pr_info("registered %s as backend for", info->name); + cxt->pstore.max_reason = info->max_reason; + cxt->pstore.name = info->name; + if (info->kmsg_size) { + cxt->pstore.flags |= PSTORE_FLAGS_DMESG; + pr_cont(" kmsg(%s", + kmsg_dump_reason_str(cxt->pstore.max_reason)); + if (cxt->pstore_zone_info->panic_write) + pr_cont(",panic_write"); + pr_cont(")"); + } + if (info->pmsg_size) { + cxt->pstore.flags |= PSTORE_FLAGS_PMSG; + pr_cont(" pmsg"); + } + if (info->console_size) { + cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; + pr_cont(" console"); + } + if (info->ftrace_size) { + cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; + pr_cont(" ftrace"); + } + pr_cont("\n"); + + err = pstore_register(&cxt->pstore); + if (err) { + pr_err("registering with pstore failed\n"); + goto fail_free; + } + mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock); + + return 0; + +fail_free: + kfree(cxt->pstore.buf); + cxt->pstore.buf = NULL; + cxt->pstore.bufsize = 0; + psz_free_all_zones(cxt); +fail_out: + pstore_zone_cxt.pstore_zone_info = NULL; + mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock); + return err; +} +EXPORT_SYMBOL_GPL(register_pstore_zone); + +/** + * unregister_pstore_zone() - unregister to pstore/zone + * + * @info: back-end driver information. See struct pstore_zone_info. + */ +void unregister_pstore_zone(struct pstore_zone_info *info) +{ + struct psz_context *cxt = &pstore_zone_cxt; + + mutex_lock(&cxt->pstore_zone_info_lock); + if (!cxt->pstore_zone_info) { + mutex_unlock(&cxt->pstore_zone_info_lock); + return; + } + + /* Stop incoming writes from pstore. */ + pstore_unregister(&cxt->pstore); + + /* Flush any pending writes. */ + psz_flush_all_dirty_zones(NULL); + flush_delayed_work(&psz_cleaner); + + /* Clean up allocations. */ + kfree(cxt->pstore.buf); + cxt->pstore.buf = NULL; + cxt->pstore.bufsize = 0; + cxt->pstore_zone_info = NULL; + + psz_free_all_zones(cxt); + + /* Clear counters and zone state. */ + cxt->oops_counter = 0; + cxt->panic_counter = 0; + atomic_set(&cxt->recovered, 0); + atomic_set(&cxt->on_panic, 0); + + mutex_unlock(&cxt->pstore_zone_info_lock); +} +EXPORT_SYMBOL_GPL(unregister_pstore_zone); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>"); +MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); +MODULE_DESCRIPTION("Storage Manager for pstore/blk"); diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c index 345db56c98fd..755293c8c71a 100644 --- a/fs/qnx6/inode.c +++ b/fs/qnx6/inode.c @@ -99,10 +99,9 @@ static int qnx6_readpage(struct file *file, struct page *page) return mpage_readpage(page, qnx6_get_block); } -static int qnx6_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void qnx6_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, qnx6_get_block); + mpage_readahead(rac, qnx6_get_block); } /* @@ -499,7 +498,7 @@ static sector_t qnx6_bmap(struct address_space *mapping, sector_t block) } static const struct address_space_operations qnx6_aops = { .readpage = qnx6_readpage, - .readpages = qnx6_readpages, + .readahead = qnx6_readahead, .bmap = qnx6_bmap }; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index b6a4f692d345..7b4bac91146b 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2841,7 +2841,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = { EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); static int do_proc_dqstats(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { unsigned int type = (unsigned long *)table->data - dqstats.stat; s64 value = percpu_counter_sum(&dqstats.counter[type]); diff --git a/fs/readdir.c b/fs/readdir.c index de2eceffdee8..a49f07c11cfb 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -157,17 +157,18 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen, } buf->result++; dirent = buf->dirent; - if (!access_ok(dirent, + if (!user_write_access_begin(dirent, (unsigned long)(dirent->d_name + namlen + 1) - (unsigned long)dirent)) goto efault; - if ( __put_user(d_ino, &dirent->d_ino) || - __put_user(offset, &dirent->d_offset) || - __put_user(namlen, &dirent->d_namlen) || - __copy_to_user(dirent->d_name, name, namlen) || - __put_user(0, dirent->d_name + namlen)) - goto efault; + unsafe_put_user(d_ino, &dirent->d_ino, efault_end); + unsafe_put_user(offset, &dirent->d_offset, efault_end); + unsafe_put_user(namlen, &dirent->d_namlen, efault_end); + unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); + user_write_access_end(); return 0; +efault_end: + user_write_access_end(); efault: buf->result = -EFAULT; return -EFAULT; @@ -242,7 +243,7 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen, return -EINTR; dirent = buf->current_dir; prev = (void __user *) dirent - prev_reclen; - if (!user_access_begin(prev, reclen + prev_reclen)) + if (!user_write_access_begin(prev, reclen + prev_reclen)) goto efault; /* This might be 'dirent->d_off', but if so it will get overwritten */ @@ -251,14 +252,14 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen, unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); - user_access_end(); + user_write_access_end(); buf->current_dir = (void __user *)dirent + reclen; buf->prev_reclen = reclen; buf->count -= reclen; return 0; efault_end: - user_access_end(); + user_write_access_end(); efault: buf->error = -EFAULT; return -EFAULT; @@ -275,9 +276,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, }; int error; - if (!access_ok(dirent, count)) - return -EFAULT; - f = fdget_pos(fd); if (!f.file) return -EBADF; @@ -327,7 +325,7 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen, return -EINTR; dirent = buf->current_dir; prev = (void __user *)dirent - prev_reclen; - if (!user_access_begin(prev, reclen + prev_reclen)) + if (!user_write_access_begin(prev, reclen + prev_reclen)) goto efault; /* This might be 'dirent->d_off', but if so it will get overwritten */ @@ -336,7 +334,7 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen, unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, &dirent->d_type, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); - user_access_end(); + user_write_access_end(); buf->prev_reclen = reclen; buf->current_dir = (void __user *)dirent + reclen; @@ -344,7 +342,7 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen, return 0; efault_end: - user_access_end(); + user_write_access_end(); efault: buf->error = -EFAULT; return -EFAULT; @@ -361,9 +359,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, }; int error; - if (!access_ok(dirent, count)) - return -EFAULT; - f = fdget_pos(fd); if (!f.file) return -EBADF; @@ -376,7 +371,7 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, typeof(lastdirent->d_off) d_off = buf.ctx.pos; lastdirent = (void __user *) buf.current_dir - buf.prev_reclen; - if (__put_user(d_off, &lastdirent->d_off)) + if (put_user(d_off, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; @@ -424,17 +419,18 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name, } buf->result++; dirent = buf->dirent; - if (!access_ok(dirent, + if (!user_write_access_begin(dirent, (unsigned long)(dirent->d_name + namlen + 1) - (unsigned long)dirent)) goto efault; - if ( __put_user(d_ino, &dirent->d_ino) || - __put_user(offset, &dirent->d_offset) || - __put_user(namlen, &dirent->d_namlen) || - __copy_to_user(dirent->d_name, name, namlen) || - __put_user(0, dirent->d_name + namlen)) - goto efault; + unsafe_put_user(d_ino, &dirent->d_ino, efault_end); + unsafe_put_user(offset, &dirent->d_offset, efault_end); + unsafe_put_user(namlen, &dirent->d_namlen, efault_end); + unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); + user_write_access_end(); return 0; +efault_end: + user_write_access_end(); efault: buf->result = -EFAULT; return -EFAULT; @@ -471,7 +467,7 @@ struct compat_linux_dirent { struct compat_getdents_callback { struct dir_context ctx; struct compat_linux_dirent __user *current_dir; - struct compat_linux_dirent __user *previous; + int prev_reclen; int count; int error; }; @@ -479,13 +475,17 @@ struct compat_getdents_callback { static int compat_filldir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { - struct compat_linux_dirent __user * dirent; + struct compat_linux_dirent __user *dirent, *prev; struct compat_getdents_callback *buf = container_of(ctx, struct compat_getdents_callback, ctx); compat_ulong_t d_ino; int reclen = ALIGN(offsetof(struct compat_linux_dirent, d_name) + namlen + 2, sizeof(compat_long_t)); + int prev_reclen; + buf->error = verify_dirent_name(name, namlen); + if (unlikely(buf->error)) + return buf->error; buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; @@ -494,29 +494,27 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen, buf->error = -EOVERFLOW; return -EOVERFLOW; } - dirent = buf->previous; - if (dirent) { - if (signal_pending(current)) - return -EINTR; - if (__put_user(offset, &dirent->d_off)) - goto efault; - } + prev_reclen = buf->prev_reclen; + if (prev_reclen && signal_pending(current)) + return -EINTR; dirent = buf->current_dir; - if (__put_user(d_ino, &dirent->d_ino)) - goto efault; - if (__put_user(reclen, &dirent->d_reclen)) - goto efault; - if (copy_to_user(dirent->d_name, name, namlen)) - goto efault; - if (__put_user(0, dirent->d_name + namlen)) - goto efault; - if (__put_user(d_type, (char __user *) dirent + reclen - 1)) + prev = (void __user *) dirent - prev_reclen; + if (!user_write_access_begin(prev, reclen + prev_reclen)) goto efault; - buf->previous = dirent; - dirent = (void __user *)dirent + reclen; - buf->current_dir = dirent; + + unsafe_put_user(offset, &prev->d_off, efault_end); + unsafe_put_user(d_ino, &dirent->d_ino, efault_end); + unsafe_put_user(reclen, &dirent->d_reclen, efault_end); + unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end); + unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); + user_write_access_end(); + + buf->prev_reclen = reclen; + buf->current_dir = (void __user *)dirent + reclen; buf->count -= reclen; return 0; +efault_end: + user_write_access_end(); efault: buf->error = -EFAULT; return -EFAULT; @@ -526,7 +524,6 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, struct compat_linux_dirent __user *, dirent, unsigned int, count) { struct fd f; - struct compat_linux_dirent __user * lastdirent; struct compat_getdents_callback buf = { .ctx.actor = compat_filldir, .current_dir = dirent, @@ -534,9 +531,6 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, }; int error; - if (!access_ok(dirent, count)) - return -EFAULT; - f = fdget_pos(fd); if (!f.file) return -EBADF; @@ -544,8 +538,10 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, error = iterate_dir(f.file, &buf.ctx); if (error >= 0) error = buf.error; - lastdirent = buf.previous; - if (lastdirent) { + if (buf.prev_reclen) { + struct compat_linux_dirent __user * lastdirent; + lastdirent = (void __user *)buf.current_dir - buf.prev_reclen; + if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 84cf8bdbec9c..0b641ae694f1 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -159,7 +159,7 @@ static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end, barrier_done = reiserfs_commit_for_inode(inode); reiserfs_write_unlock(inode->i_sb); if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) - blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); inode_unlock(inode); if (barrier_done < 0) return barrier_done; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 6419e6dacc39..1509775da040 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1066,7 +1066,7 @@ research: } else { /* paste hole to the indirect item */ /* - * If kmalloc failed, max_to_insert becomes + * If kcalloc failed, max_to_insert becomes * zero and it means we only have space for * one block */ @@ -1160,11 +1160,9 @@ failure: return retval; } -static int -reiserfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void reiserfs_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block); + mpage_readahead(rac, reiserfs_get_block); } /* @@ -3434,7 +3432,7 @@ out: const struct address_space_operations reiserfs_address_space_operations = { .writepage = reiserfs_writepage, .readpage = reiserfs_readpage, - .readpages = reiserfs_readpages, + .readahead = reiserfs_readahead, .releasepage = reiserfs_releasepage, .invalidatepage = reiserfs_invalidatepage, .write_begin = reiserfs_write_begin, diff --git a/fs/romfs/Kconfig b/fs/romfs/Kconfig index ad4c45788896..9737b8e68878 100644 --- a/fs/romfs/Kconfig +++ b/fs/romfs/Kconfig @@ -6,7 +6,7 @@ config ROMFS_FS This is a very small read-only file system mainly intended for initial ram disks of installation disks, but it could be used for other read-only media as well. Read - <file:Documentation/filesystems/romfs.txt> for details. + <file:Documentation/filesystems/romfs.rst> for details. To compile this file system support as a module, choose M here: the module will be called romfs. Note that the file system of your diff --git a/fs/seq_file.c b/fs/seq_file.c index 70f5fdf99bf6..4e6239f33c06 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -6,6 +6,8 @@ * initial implementation -- AV, Oct 2001. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/cache.h> #include <linux/fs.h> #include <linux/export.h> @@ -233,9 +235,8 @@ Fill: p = m->op->next(m, p, &m->index); if (pos == m->index) { - pr_info_ratelimited("buggy seq_file .next function %ps " - "did not updated position index\n", - m->op->next); + pr_info_ratelimited("buggy .next function %ps did not update position index\n", + m->op->next); m->index++; } if (!p || IS_ERR(p)) { diff --git a/fs/splice.c b/fs/splice.c index 4e53efbd621d..6b3c9a018a8e 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -44,8 +44,8 @@ * addition of remove_mapping(). If success is returned, the caller may * attempt to reuse this page for another destination. */ -static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) +static bool page_cache_pipe_buf_try_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) { struct page *page = buf->page; struct address_space *mapping; @@ -76,7 +76,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, */ if (remove_mapping(mapping, page)) { buf->flags |= PIPE_BUF_FLAG_LRU; - return 0; + return true; } } @@ -86,7 +86,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, */ out_unlock: unlock_page(page); - return 1; + return false; } static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, @@ -139,27 +139,26 @@ error: } const struct pipe_buf_operations page_cache_pipe_buf_ops = { - .confirm = page_cache_pipe_buf_confirm, - .release = page_cache_pipe_buf_release, - .steal = page_cache_pipe_buf_steal, - .get = generic_pipe_buf_get, + .confirm = page_cache_pipe_buf_confirm, + .release = page_cache_pipe_buf_release, + .try_steal = page_cache_pipe_buf_try_steal, + .get = generic_pipe_buf_get, }; -static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) +static bool user_page_pipe_buf_try_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) { if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) - return 1; + return false; buf->flags |= PIPE_BUF_FLAG_LRU; - return generic_pipe_buf_steal(pipe, buf); + return generic_pipe_buf_try_steal(pipe, buf); } static const struct pipe_buf_operations user_page_pipe_buf_ops = { - .confirm = generic_pipe_buf_confirm, - .release = page_cache_pipe_buf_release, - .steal = user_page_pipe_buf_steal, - .get = generic_pipe_buf_get, + .release = page_cache_pipe_buf_release, + .try_steal = user_page_pipe_buf_try_steal, + .get = generic_pipe_buf_get, }; static void wakeup_pipe_readers(struct pipe_inode_info *pipe) @@ -331,24 +330,15 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, EXPORT_SYMBOL(generic_file_splice_read); const struct pipe_buf_operations default_pipe_buf_ops = { - .confirm = generic_pipe_buf_confirm, - .release = generic_pipe_buf_release, - .steal = generic_pipe_buf_steal, - .get = generic_pipe_buf_get, + .release = generic_pipe_buf_release, + .try_steal = generic_pipe_buf_try_steal, + .get = generic_pipe_buf_get, }; -int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) -{ - return 1; -} - /* Pipe buffer operations for a socket and similar. */ const struct pipe_buf_operations nosteal_pipe_buf_ops = { - .confirm = generic_pipe_buf_confirm, - .release = generic_pipe_buf_release, - .steal = generic_pipe_buf_nosteal, - .get = generic_pipe_buf_get, + .release = generic_pipe_buf_release, + .get = generic_pipe_buf_get, }; EXPORT_SYMBOL(nosteal_pipe_buf_ops); @@ -852,15 +842,9 @@ EXPORT_SYMBOL(generic_splice_sendpage); static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { - ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, - loff_t *, size_t, unsigned int); - if (out->f_op->splice_write) - splice_write = out->f_op->splice_write; - else - splice_write = default_file_splice_write; - - return splice_write(pipe, out, ppos, len, flags); + return out->f_op->splice_write(pipe, out, ppos, len, flags); + return default_file_splice_write(pipe, out, ppos, len, flags); } /* @@ -870,8 +854,6 @@ static long do_splice_to(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { - ssize_t (*splice_read)(struct file *, loff_t *, - struct pipe_inode_info *, size_t, unsigned int); int ret; if (unlikely(!(in->f_mode & FMODE_READ))) @@ -885,11 +867,8 @@ static long do_splice_to(struct file *in, loff_t *ppos, len = MAX_RW_COUNT; if (in->f_op->splice_read) - splice_read = in->f_op->splice_read; - else - splice_read = default_file_splice_read; - - return splice_read(in, ppos, pipe, len, flags); + return in->f_op->splice_read(in, ppos, pipe, len, flags); + return default_file_splice_read(in, ppos, pipe, len, flags); } /** @@ -1626,12 +1605,11 @@ retry: *obuf = *ibuf; /* - * Don't inherit the gift flag, we need to + * Don't inherit the gift and merge flags, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; - - pipe_buf_mark_unmergeable(obuf); + obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE; obuf->len = len; ibuf->offset += len; @@ -1719,12 +1697,11 @@ static int link_pipe(struct pipe_inode_info *ipipe, *obuf = *ibuf; /* - * Don't inherit the gift flag, we need to - * prevent multiple steals of this page. + * Don't inherit the gift and merge flag, we need to prevent + * multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; - - pipe_buf_mark_unmergeable(obuf); + obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE; if (obuf->len > len) obuf->len = len; @@ -1754,8 +1731,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, * The 'flags' used are the SPLICE_F_* variants, currently the only * applicable one is SPLICE_F_NONBLOCK. */ -static long do_tee(struct file *in, struct file *out, size_t len, - unsigned int flags) +long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe = get_pipe_info(in); struct pipe_inode_info *opipe = get_pipe_info(out); diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 4f9b9fb59362..64f61330564a 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -13,6 +13,7 @@ * datablocks and metadata blocks. */ +#include <linux/blkdev.h> #include <linux/fs.h> #include <linux/vfs.h> #include <linux/slab.h> @@ -27,44 +28,103 @@ #include "page_actor.h" /* - * Read the metadata block length, this is stored in the first two - * bytes of the metadata block. + * Returns the amount of bytes copied to the page actor. */ -static struct buffer_head *get_block_length(struct super_block *sb, - u64 *cur_index, int *offset, int *length) +static int copy_bio_to_actor(struct bio *bio, + struct squashfs_page_actor *actor, + int offset, int req_length) +{ + void *actor_addr = squashfs_first_page(actor); + struct bvec_iter_all iter_all = {}; + struct bio_vec *bvec = bvec_init_iter_all(&iter_all); + int copied_bytes = 0; + int actor_offset = 0; + + if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) + return 0; + + while (copied_bytes < req_length) { + int bytes_to_copy = min_t(int, bvec->bv_len - offset, + PAGE_SIZE - actor_offset); + + bytes_to_copy = min_t(int, bytes_to_copy, + req_length - copied_bytes); + memcpy(actor_addr + actor_offset, + page_address(bvec->bv_page) + bvec->bv_offset + offset, + bytes_to_copy); + + actor_offset += bytes_to_copy; + copied_bytes += bytes_to_copy; + offset += bytes_to_copy; + + if (actor_offset >= PAGE_SIZE) { + actor_addr = squashfs_next_page(actor); + if (!actor_addr) + break; + actor_offset = 0; + } + if (offset >= bvec->bv_len) { + if (!bio_next_segment(bio, &iter_all)) + break; + offset = 0; + } + } + squashfs_finish_page(actor); + return copied_bytes; +} + +static int squashfs_bio_read(struct super_block *sb, u64 index, int length, + struct bio **biop, int *block_offset) { struct squashfs_sb_info *msblk = sb->s_fs_info; - struct buffer_head *bh; - - bh = sb_bread(sb, *cur_index); - if (bh == NULL) - return NULL; - - if (msblk->devblksize - *offset == 1) { - *length = (unsigned char) bh->b_data[*offset]; - put_bh(bh); - bh = sb_bread(sb, ++(*cur_index)); - if (bh == NULL) - return NULL; - *length |= (unsigned char) bh->b_data[0] << 8; - *offset = 1; - } else { - *length = (unsigned char) bh->b_data[*offset] | - (unsigned char) bh->b_data[*offset + 1] << 8; - *offset += 2; - - if (*offset == msblk->devblksize) { - put_bh(bh); - bh = sb_bread(sb, ++(*cur_index)); - if (bh == NULL) - return NULL; - *offset = 0; + const u64 read_start = round_down(index, msblk->devblksize); + const sector_t block = read_start >> msblk->devblksize_log2; + const u64 read_end = round_up(index + length, msblk->devblksize); + const sector_t block_end = read_end >> msblk->devblksize_log2; + int offset = read_start - round_down(index, PAGE_SIZE); + int total_len = (block_end - block) << msblk->devblksize_log2; + const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE); + int error, i; + struct bio *bio; + + bio = bio_alloc(GFP_NOIO, page_count); + if (!bio) + return -ENOMEM; + + bio_set_dev(bio, sb->s_bdev); + bio->bi_opf = READ; + bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT); + + for (i = 0; i < page_count; ++i) { + unsigned int len = + min_t(unsigned int, PAGE_SIZE - offset, total_len); + struct page *page = alloc_page(GFP_NOIO); + + if (!page) { + error = -ENOMEM; + goto out_free_bio; + } + if (!bio_add_page(bio, page, len, offset)) { + error = -EIO; + goto out_free_bio; } + offset = 0; + total_len -= len; } - return bh; -} + error = submit_bio_wait(bio); + if (error) + goto out_free_bio; + *biop = bio; + *block_offset = index & ((1 << msblk->devblksize_log2) - 1); + return 0; + +out_free_bio: + bio_free_pages(bio); + bio_put(bio); + return error; +} /* * Read and decompress a metadata block or datablock. Length is non-zero @@ -76,129 +136,88 @@ static struct buffer_head *get_block_length(struct super_block *sb, * algorithms). */ int squashfs_read_data(struct super_block *sb, u64 index, int length, - u64 *next_index, struct squashfs_page_actor *output) + u64 *next_index, struct squashfs_page_actor *output) { struct squashfs_sb_info *msblk = sb->s_fs_info; - struct buffer_head **bh; - int offset = index & ((1 << msblk->devblksize_log2) - 1); - u64 cur_index = index >> msblk->devblksize_log2; - int bytes, compressed, b = 0, k = 0, avail, i; - - bh = kcalloc(((output->length + msblk->devblksize - 1) - >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL); - if (bh == NULL) - return -ENOMEM; + struct bio *bio = NULL; + int compressed; + int res; + int offset; if (length) { /* * Datablock. */ - bytes = -offset; compressed = SQUASHFS_COMPRESSED_BLOCK(length); length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length); - if (next_index) - *next_index = index + length; - TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, compressed ? "" : "un", length, output->length); - - if (length < 0 || length > output->length || - (index + length) > msblk->bytes_used) - goto read_failure; - - for (b = 0; bytes < length; b++, cur_index++) { - bh[b] = sb_getblk(sb, cur_index); - if (bh[b] == NULL) - goto block_release; - bytes += msblk->devblksize; - } - ll_rw_block(REQ_OP_READ, 0, b, bh); } else { /* * Metadata block. */ - if ((index + 2) > msblk->bytes_used) - goto read_failure; + const u8 *data; + struct bvec_iter_all iter_all = {}; + struct bio_vec *bvec = bvec_init_iter_all(&iter_all); - bh[0] = get_block_length(sb, &cur_index, &offset, &length); - if (bh[0] == NULL) - goto read_failure; - b = 1; + if (index + 2 > msblk->bytes_used) { + res = -EIO; + goto out; + } + res = squashfs_bio_read(sb, index, 2, &bio, &offset); + if (res) + goto out; + + if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) { + res = -EIO; + goto out_free_bio; + } + /* Extract the length of the metadata block */ + data = page_address(bvec->bv_page) + bvec->bv_offset; + length = data[offset]; + if (offset <= bvec->bv_len - 1) { + length |= data[offset + 1] << 8; + } else { + if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) { + res = -EIO; + goto out_free_bio; + } + data = page_address(bvec->bv_page) + bvec->bv_offset; + length |= data[0] << 8; + } + bio_free_pages(bio); + bio_put(bio); - bytes = msblk->devblksize - offset; compressed = SQUASHFS_COMPRESSED(length); length = SQUASHFS_COMPRESSED_SIZE(length); - if (next_index) - *next_index = index + length + 2; + index += 2; TRACE("Block @ 0x%llx, %scompressed size %d\n", index, - compressed ? "" : "un", length); - - if (length < 0 || length > output->length || - (index + length) > msblk->bytes_used) - goto block_release; - - for (; bytes < length; b++) { - bh[b] = sb_getblk(sb, ++cur_index); - if (bh[b] == NULL) - goto block_release; - bytes += msblk->devblksize; - } - ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1); + compressed ? "" : "un", length); } + if (next_index) + *next_index = index + length; - for (i = 0; i < b; i++) { - wait_on_buffer(bh[i]); - if (!buffer_uptodate(bh[i])) - goto block_release; - } + res = squashfs_bio_read(sb, index, length, &bio, &offset); + if (res) + goto out; if (compressed) { - if (!msblk->stream) - goto read_failure; - length = squashfs_decompress(msblk, bh, b, offset, length, - output); - if (length < 0) - goto read_failure; - } else { - /* - * Block is uncompressed. - */ - int in, pg_offset = 0; - void *data = squashfs_first_page(output); - - for (bytes = length; k < b; k++) { - in = min(bytes, msblk->devblksize - offset); - bytes -= in; - while (in) { - if (pg_offset == PAGE_SIZE) { - data = squashfs_next_page(output); - pg_offset = 0; - } - avail = min_t(int, in, PAGE_SIZE - - pg_offset); - memcpy(data + pg_offset, bh[k]->b_data + offset, - avail); - in -= avail; - pg_offset += avail; - offset += avail; - } - offset = 0; - put_bh(bh[k]); + if (!msblk->stream) { + res = -EIO; + goto out_free_bio; } - squashfs_finish_page(output); + res = squashfs_decompress(msblk, bio, offset, length, output); + } else { + res = copy_bio_to_actor(bio, output, offset, length); } - kfree(bh); - return length; - -block_release: - for (; k < b; k++) - put_bh(bh[k]); +out_free_bio: + bio_free_pages(bio); + bio_put(bio); +out: + if (res < 0) + ERROR("Failed to read block 0x%llx: %d\n", index, res); -read_failure: - ERROR("squashfs_read_data failed to read block 0x%llx\n", - (unsigned long long) index); - kfree(bh); - return -EIO; + return res; } diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h index ec8617523e56..1b9ccfd0aa51 100644 --- a/fs/squashfs/decompressor.h +++ b/fs/squashfs/decompressor.h @@ -10,13 +10,14 @@ * decompressor.h */ +#include <linux/bio.h> + struct squashfs_decompressor { void *(*init)(struct squashfs_sb_info *, void *); void *(*comp_opts)(struct squashfs_sb_info *, void *, int); void (*free)(void *); int (*decompress)(struct squashfs_sb_info *, void *, - struct buffer_head **, int, int, int, - struct squashfs_page_actor *); + struct bio *, int, int, struct squashfs_page_actor *); int id; char *name; int supported; diff --git a/fs/squashfs/decompressor_multi.c b/fs/squashfs/decompressor_multi.c index c181dee235bb..db9f12a3ea05 100644 --- a/fs/squashfs/decompressor_multi.c +++ b/fs/squashfs/decompressor_multi.c @@ -6,7 +6,7 @@ #include <linux/types.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/buffer_head.h> +#include <linux/bio.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/cpumask.h> @@ -180,14 +180,15 @@ wait: } -int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, - int b, int offset, int length, struct squashfs_page_actor *output) +int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio, + int offset, int length, + struct squashfs_page_actor *output) { int res; struct squashfs_stream *stream = msblk->stream; struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream); res = msblk->decompressor->decompress(msblk, decomp_stream->stream, - bh, b, offset, length, output); + bio, offset, length, output); put_decomp_stream(decomp_stream, stream); if (res < 0) ERROR("%s decompression failed, data probably corrupt\n", diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c index 2a2a2d106440..b881b9283b7f 100644 --- a/fs/squashfs/decompressor_multi_percpu.c +++ b/fs/squashfs/decompressor_multi_percpu.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/percpu.h> #include <linux/buffer_head.h> +#include <linux/local_lock.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" @@ -20,7 +21,8 @@ */ struct squashfs_stream { - void *stream; + void *stream; + local_lock_t lock; }; void *squashfs_decompressor_create(struct squashfs_sb_info *msblk, @@ -41,6 +43,7 @@ void *squashfs_decompressor_create(struct squashfs_sb_info *msblk, err = PTR_ERR(stream->stream); goto out; } + local_lock_init(&stream->lock); } kfree(comp_opts); @@ -72,15 +75,19 @@ void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk) } } -int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, - int b, int offset, int length, struct squashfs_page_actor *output) +int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio, + int offset, int length, struct squashfs_page_actor *output) { - struct squashfs_stream __percpu *percpu = - (struct squashfs_stream __percpu *) msblk->stream; - struct squashfs_stream *stream = get_cpu_ptr(percpu); - int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, - offset, length, output); - put_cpu_ptr(stream); + struct squashfs_stream *stream; + int res; + + local_lock(&msblk->stream->lock); + stream = this_cpu_ptr(msblk->stream); + + res = msblk->decompressor->decompress(msblk, stream->stream, bio, + offset, length, output); + + local_unlock(&msblk->stream->lock); if (res < 0) ERROR("%s decompression failed, data probably corrupt\n", diff --git a/fs/squashfs/decompressor_single.c b/fs/squashfs/decompressor_single.c index 550c3e592032..4eb3d083d45e 100644 --- a/fs/squashfs/decompressor_single.c +++ b/fs/squashfs/decompressor_single.c @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/buffer_head.h> +#include <linux/bio.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" @@ -59,14 +59,15 @@ void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk) } } -int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, - int b, int offset, int length, struct squashfs_page_actor *output) +int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio, + int offset, int length, + struct squashfs_page_actor *output) { int res; struct squashfs_stream *stream = msblk->stream; mutex_lock(&stream->mutex); - res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, + res = msblk->decompressor->decompress(msblk, stream->stream, bio, offset, length, output); mutex_unlock(&stream->mutex); diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c index c4e47e0588c7..233d5582fbee 100644 --- a/fs/squashfs/lz4_wrapper.c +++ b/fs/squashfs/lz4_wrapper.c @@ -4,7 +4,7 @@ * Phillip Lougher <phillip@squashfs.org.uk> */ -#include <linux/buffer_head.h> +#include <linux/bio.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -89,20 +89,23 @@ static void lz4_free(void *strm) static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm, - struct buffer_head **bh, int b, int offset, int length, + struct bio *bio, int offset, int length, struct squashfs_page_actor *output) { + struct bvec_iter_all iter_all = {}; + struct bio_vec *bvec = bvec_init_iter_all(&iter_all); struct squashfs_lz4 *stream = strm; void *buff = stream->input, *data; - int avail, i, bytes = length, res; + int bytes = length, res; - for (i = 0; i < b; i++) { - avail = min(bytes, msblk->devblksize - offset); - memcpy(buff, bh[i]->b_data + offset, avail); + while (bio_next_segment(bio, &iter_all)) { + int avail = min(bytes, ((int)bvec->bv_len) - offset); + + data = page_address(bvec->bv_page) + bvec->bv_offset; + memcpy(buff, data + offset, avail); buff += avail; bytes -= avail; offset = 0; - put_bh(bh[i]); } res = LZ4_decompress_safe(stream->input, stream->output, diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c index aa3c3dafc33d..97bb7d92ddcd 100644 --- a/fs/squashfs/lzo_wrapper.c +++ b/fs/squashfs/lzo_wrapper.c @@ -9,7 +9,7 @@ */ #include <linux/mutex.h> -#include <linux/buffer_head.h> +#include <linux/bio.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/lzo.h> @@ -63,21 +63,24 @@ static void lzo_free(void *strm) static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm, - struct buffer_head **bh, int b, int offset, int length, + struct bio *bio, int offset, int length, struct squashfs_page_actor *output) { + struct bvec_iter_all iter_all = {}; + struct bio_vec *bvec = bvec_init_iter_all(&iter_all); struct squashfs_lzo *stream = strm; void *buff = stream->input, *data; - int avail, i, bytes = length, res; + int bytes = length, res; size_t out_len = output->length; - for (i = 0; i < b; i++) { - avail = min(bytes, msblk->devblksize - offset); - memcpy(buff, bh[i]->b_data + offset, avail); + while (bio_next_segment(bio, &iter_all)) { + int avail = min(bytes, ((int)bvec->bv_len) - offset); + + data = page_address(bvec->bv_page) + bvec->bv_offset; + memcpy(buff, data + offset, avail); buff += avail; bytes -= avail; offset = 0; - put_bh(bh[i]); } res = lzo1x_decompress_safe(stream->input, (size_t)length, diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 2797763ed046..9783e01c8100 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -40,8 +40,8 @@ extern void *squashfs_decompressor_setup(struct super_block *, unsigned short); /* decompressor_xxx.c */ extern void *squashfs_decompressor_create(struct squashfs_sb_info *, void *); extern void squashfs_decompressor_destroy(struct squashfs_sb_info *); -extern int squashfs_decompress(struct squashfs_sb_info *, struct buffer_head **, - int, int, int, struct squashfs_page_actor *); +extern int squashfs_decompress(struct squashfs_sb_info *, struct bio *, + int, int, struct squashfs_page_actor *); extern int squashfs_max_decompressors(void); /* export.c */ diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c index 4b2f2051a6dc..e80419aed862 100644 --- a/fs/squashfs/xz_wrapper.c +++ b/fs/squashfs/xz_wrapper.c @@ -10,7 +10,7 @@ #include <linux/mutex.h> -#include <linux/buffer_head.h> +#include <linux/bio.h> #include <linux/slab.h> #include <linux/xz.h> #include <linux/bitops.h> @@ -117,11 +117,12 @@ static void squashfs_xz_free(void *strm) static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm, - struct buffer_head **bh, int b, int offset, int length, + struct bio *bio, int offset, int length, struct squashfs_page_actor *output) { - enum xz_ret xz_err; - int avail, total = 0, k = 0; + struct bvec_iter_all iter_all = {}; + struct bio_vec *bvec = bvec_init_iter_all(&iter_all); + int total = 0, error = 0; struct squashfs_xz *stream = strm; xz_dec_reset(stream->state); @@ -131,11 +132,23 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm, stream->buf.out_size = PAGE_SIZE; stream->buf.out = squashfs_first_page(output); - do { - if (stream->buf.in_pos == stream->buf.in_size && k < b) { - avail = min(length, msblk->devblksize - offset); + for (;;) { + enum xz_ret xz_err; + + if (stream->buf.in_pos == stream->buf.in_size) { + const void *data; + int avail; + + if (!bio_next_segment(bio, &iter_all)) { + /* XZ_STREAM_END must be reached. */ + error = -EIO; + break; + } + + avail = min(length, ((int)bvec->bv_len) - offset); + data = page_address(bvec->bv_page) + bvec->bv_offset; length -= avail; - stream->buf.in = bh[k]->b_data + offset; + stream->buf.in = data + offset; stream->buf.in_size = avail; stream->buf.in_pos = 0; offset = 0; @@ -150,23 +163,17 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm, } xz_err = xz_dec_run(stream->state, &stream->buf); - - if (stream->buf.in_pos == stream->buf.in_size && k < b) - put_bh(bh[k++]); - } while (xz_err == XZ_OK); + if (xz_err == XZ_STREAM_END) + break; + if (xz_err != XZ_OK) { + error = -EIO; + break; + } + } squashfs_finish_page(output); - if (xz_err != XZ_STREAM_END || k < b) - goto out; - - return total + stream->buf.out_pos; - -out: - for (; k < b; k++) - put_bh(bh[k]); - - return -EIO; + return error ? error : total + stream->buf.out_pos; } const struct squashfs_decompressor squashfs_xz_comp_ops = { diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index f2226afa1625..bcb881ec47f2 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -10,7 +10,7 @@ #include <linux/mutex.h> -#include <linux/buffer_head.h> +#include <linux/bio.h> #include <linux/slab.h> #include <linux/zlib.h> #include <linux/vmalloc.h> @@ -50,21 +50,35 @@ static void zlib_free(void *strm) static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm, - struct buffer_head **bh, int b, int offset, int length, + struct bio *bio, int offset, int length, struct squashfs_page_actor *output) { - int zlib_err, zlib_init = 0, k = 0; + struct bvec_iter_all iter_all = {}; + struct bio_vec *bvec = bvec_init_iter_all(&iter_all); + int zlib_init = 0, error = 0; z_stream *stream = strm; stream->avail_out = PAGE_SIZE; stream->next_out = squashfs_first_page(output); stream->avail_in = 0; - do { - if (stream->avail_in == 0 && k < b) { - int avail = min(length, msblk->devblksize - offset); + for (;;) { + int zlib_err; + + if (stream->avail_in == 0) { + const void *data; + int avail; + + if (!bio_next_segment(bio, &iter_all)) { + /* Z_STREAM_END must be reached. */ + error = -EIO; + break; + } + + avail = min(length, ((int)bvec->bv_len) - offset); + data = page_address(bvec->bv_page) + bvec->bv_offset; length -= avail; - stream->next_in = bh[k]->b_data + offset; + stream->next_in = data + offset; stream->avail_in = avail; offset = 0; } @@ -78,37 +92,28 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm, if (!zlib_init) { zlib_err = zlib_inflateInit(stream); if (zlib_err != Z_OK) { - squashfs_finish_page(output); - goto out; + error = -EIO; + break; } zlib_init = 1; } zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH); - - if (stream->avail_in == 0 && k < b) - put_bh(bh[k++]); - } while (zlib_err == Z_OK); + if (zlib_err == Z_STREAM_END) + break; + if (zlib_err != Z_OK) { + error = -EIO; + break; + } + } squashfs_finish_page(output); - if (zlib_err != Z_STREAM_END) - goto out; - - zlib_err = zlib_inflateEnd(stream); - if (zlib_err != Z_OK) - goto out; - - if (k < b) - goto out; - - return stream->total_out; - -out: - for (; k < b; k++) - put_bh(bh[k]); + if (!error) + if (zlib_inflateEnd(stream) != Z_OK) + error = -EIO; - return -EIO; + return error ? error : stream->total_out; } const struct squashfs_decompressor squashfs_zlib_comp_ops = { diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c index b448c2a1d0ed..b7cb1faa652d 100644 --- a/fs/squashfs/zstd_wrapper.c +++ b/fs/squashfs/zstd_wrapper.c @@ -9,7 +9,7 @@ */ #include <linux/mutex.h> -#include <linux/buffer_head.h> +#include <linux/bio.h> #include <linux/slab.h> #include <linux/zstd.h> #include <linux/vmalloc.h> @@ -59,33 +59,44 @@ static void zstd_free(void *strm) static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm, - struct buffer_head **bh, int b, int offset, int length, + struct bio *bio, int offset, int length, struct squashfs_page_actor *output) { struct workspace *wksp = strm; ZSTD_DStream *stream; size_t total_out = 0; - size_t zstd_err; - int k = 0; + int error = 0; ZSTD_inBuffer in_buf = { NULL, 0, 0 }; ZSTD_outBuffer out_buf = { NULL, 0, 0 }; + struct bvec_iter_all iter_all = {}; + struct bio_vec *bvec = bvec_init_iter_all(&iter_all); stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size); if (!stream) { ERROR("Failed to initialize zstd decompressor\n"); - goto out; + return -EIO; } out_buf.size = PAGE_SIZE; out_buf.dst = squashfs_first_page(output); - do { - if (in_buf.pos == in_buf.size && k < b) { - int avail = min(length, msblk->devblksize - offset); + for (;;) { + size_t zstd_err; + if (in_buf.pos == in_buf.size) { + const void *data; + int avail; + + if (!bio_next_segment(bio, &iter_all)) { + error = -EIO; + break; + } + + avail = min(length, ((int)bvec->bv_len) - offset); + data = page_address(bvec->bv_page) + bvec->bv_offset; length -= avail; - in_buf.src = bh[k]->b_data + offset; + in_buf.src = data + offset; in_buf.size = avail; in_buf.pos = 0; offset = 0; @@ -97,8 +108,8 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm, /* Shouldn't run out of pages * before stream is done. */ - squashfs_finish_page(output); - goto out; + error = -EIO; + break; } out_buf.pos = 0; out_buf.size = PAGE_SIZE; @@ -107,29 +118,20 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm, total_out -= out_buf.pos; zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf); total_out += out_buf.pos; /* add the additional data produced */ - - if (in_buf.pos == in_buf.size && k < b) - put_bh(bh[k++]); - } while (zstd_err != 0 && !ZSTD_isError(zstd_err)); - - squashfs_finish_page(output); - - if (ZSTD_isError(zstd_err)) { - ERROR("zstd decompression error: %d\n", - (int)ZSTD_getErrorCode(zstd_err)); - goto out; + if (zstd_err == 0) + break; + + if (ZSTD_isError(zstd_err)) { + ERROR("zstd decompression error: %d\n", + (int)ZSTD_getErrorCode(zstd_err)); + error = -EIO; + break; + } } - if (k < b) - goto out; - - return (int)total_out; - -out: - for (; k < b; k++) - put_bh(bh[k]); + squashfs_finish_page(output); - return -EIO; + return error ? error : total_out; } const struct squashfs_decompressor squashfs_zstd_comp_ops = { diff --git a/fs/stat.c b/fs/stat.c index 030008796479..44f8ad346db4 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -22,6 +22,7 @@ #include <asm/unistd.h> #include "internal.h" +#include "mount.h" /** * generic_fillattr - Fill in the basic attributes from the inode struct @@ -70,15 +71,18 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat, memset(stat, 0, sizeof(*stat)); stat->result_mask |= STATX_BASIC_STATS; - request_mask &= STATX_ALL; query_flags &= KSTAT_QUERY_FLAGS; /* allow the fs to override these if it really wants to */ - if (IS_NOATIME(inode)) + /* SB_NOATIME means filesystem supplies dummy atime value */ + if (inode->i_sb->s_flags & SB_NOATIME) stat->result_mask &= ~STATX_ATIME; if (IS_AUTOMOUNT(inode)) stat->attributes |= STATX_ATTR_AUTOMOUNT; + if (IS_DAX(inode)) + stat->attributes |= STATX_ATTR_DAX; + if (inode->i_op->getattr) return inode->i_op->getattr(path, stat, request_mask, query_flags); @@ -152,7 +156,8 @@ int vfs_statx_fd(unsigned int fd, struct kstat *stat, } EXPORT_SYMBOL(vfs_statx_fd); -inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags) +static inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, + int flags) { if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0) @@ -199,6 +204,11 @@ retry: goto out; error = vfs_getattr(&path, stat, request_mask, flags); + stat->mnt_id = real_mount(path.mnt)->mnt_id; + stat->result_mask |= STATX_MNT_ID; + if (path.mnt->mnt_root == path.dentry) + stat->attributes |= STATX_ATTR_MOUNT_ROOT; + stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; @@ -533,7 +543,7 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, } #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ -noinline_for_stack int +static noinline_for_stack int cp_statx(const struct kstat *stat, struct statx __user *buffer) { struct statx tmp; @@ -563,10 +573,29 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer) tmp.stx_rdev_minor = MINOR(stat->rdev); tmp.stx_dev_major = MAJOR(stat->dev); tmp.stx_dev_minor = MINOR(stat->dev); + tmp.stx_mnt_id = stat->mnt_id; return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; } +int do_statx(int dfd, const char __user *filename, unsigned flags, + unsigned int mask, struct statx __user *buffer) +{ + struct kstat stat; + int error; + + if (mask & STATX__RESERVED) + return -EINVAL; + if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) + return -EINVAL; + + error = vfs_statx(dfd, filename, flags, &stat, mask); + if (error) + return error; + + return cp_statx(&stat, buffer); +} + /** * sys_statx - System call to get enhanced stats * @dfd: Base directory to pathwalk from *or* fd to stat. @@ -583,19 +612,7 @@ SYSCALL_DEFINE5(statx, unsigned int, mask, struct statx __user *, buffer) { - struct kstat stat; - int error; - - if (mask & STATX__RESERVED) - return -EINVAL; - if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) - return -EINVAL; - - error = vfs_statx(dfd, filename, flags, &stat, mask); - if (error) - return error; - - return cp_statx(&stat, buffer); + return do_statx(dfd, filename, flags, mask, buffer); } #ifdef CONFIG_COMPAT diff --git a/fs/super.c b/fs/super.c index a288cd60d2ae..bf3b7685b52a 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1598,12 +1598,10 @@ int super_setup_bdi_name(struct super_block *sb, char *fmt, ...) int err; va_list args; - bdi = bdi_alloc(GFP_KERNEL); + bdi = bdi_alloc(NUMA_NO_NODE); if (!bdi) return -ENOMEM; - bdi->name = sb->s_type->name; - va_start(args, fmt); err = bdi_register_va(bdi, fmt, args); va_end(args); diff --git a/fs/sync.c b/fs/sync.c index 4d1ff010bc5a..c6f6f5be5682 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -161,7 +161,7 @@ SYSCALL_DEFINE1(syncfs, int, fd) { struct fd f = fdget(fd); struct super_block *sb; - int ret; + int ret, ret2; if (!f.file) return -EBADF; @@ -171,8 +171,10 @@ SYSCALL_DEFINE1(syncfs, int, fd) ret = sync_filesystem(sb); up_read(&sb->s_umount); + ret2 = errseq_check_and_advance(&sb->s_wb_err, &f.file->f_sb_err); + fdput(f); - return ret; + return ret ? ret : ret2; } /** diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index aa85f2874a9f..59dffd5ca517 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -6,7 +6,7 @@ * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * - * Please see Documentation/filesystems/sysfs.txt for more information. + * Please see Documentation/filesystems/sysfs.rst for more information. */ #define pr_fmt(fmt) "sysfs: " fmt diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 26bbf960e2a2..eb6897ab78e7 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -6,7 +6,7 @@ * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * - * Please see Documentation/filesystems/sysfs.txt for more information. + * Please see Documentation/filesystems/sysfs.rst for more information. */ #include <linux/module.h> @@ -492,6 +492,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr) kernfs_put(kn); return ret; } +EXPORT_SYMBOL_GPL(sysfs_remove_file_self); void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr) { diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index db81cfbab9d6..e747c135c1d1 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -6,7 +6,7 @@ * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * - * Please see Documentation/filesystems/sysfs.txt for more information. + * Please see Documentation/filesystems/sysfs.rst for more information. */ #include <linux/fs.h> diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c index c4deecc80f67..5603530a1a52 100644 --- a/fs/sysfs/symlink.c +++ b/fs/sysfs/symlink.c @@ -6,7 +6,7 @@ * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * - * Please see Documentation/filesystems/sysfs.txt for more information. + * Please see Documentation/filesystems/sysfs.rst for more information. */ #include <linux/fs.h> diff --git a/fs/sysv/Kconfig b/fs/sysv/Kconfig index d4edf7d9ae10..b4e23e03fbeb 100644 --- a/fs/sysv/Kconfig +++ b/fs/sysv/Kconfig @@ -28,7 +28,7 @@ config SYSV_FS tar" or preferably "info tar"). Note also that this option has nothing whatsoever to do with the option "System V IPC". Read about the System V file system in - <file:Documentation/filesystems/sysv-fs.txt>. + <file:Documentation/filesystems/sysv-fs.rst>. Saying Y here will enlarge your kernel by about 27 KB. To compile this as a module, choose M here: the module will be called diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c index f985a3fbbb36..cc5c0abfd536 100644 --- a/fs/ubifs/auth.c +++ b/fs/ubifs/auth.c @@ -31,15 +31,9 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node, u8 *hash) { const struct ubifs_ch *ch = node; - SHASH_DESC_ON_STACK(shash, c->hash_tfm); - int err; - - shash->tfm = c->hash_tfm; - err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash); - if (err < 0) - return err; - return 0; + return crypto_shash_tfm_digest(c->hash_tfm, node, le32_to_cpu(ch->len), + hash); } /** @@ -53,15 +47,7 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node, static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash, u8 *hmac) { - SHASH_DESC_ON_STACK(shash, c->hmac_tfm); - int err; - - shash->tfm = c->hmac_tfm; - - err = crypto_shash_digest(shash, hash, c->hash_len, hmac); - if (err < 0) - return err; - return 0; + return crypto_shash_tfm_digest(c->hmac_tfm, hash, c->hash_len, hmac); } /** diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 0f5a480fe264..31288d8fa2ce 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -815,7 +815,7 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum) pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum); - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); + buf = __vmalloc(c->leb_size, GFP_NOFS); if (!buf) { ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum); return; diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 29826c51883a..22bfda158f7f 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -1095,7 +1095,7 @@ static int scan_check_cb(struct ubifs_info *c, return LPT_SCAN_CONTINUE; } - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); + buf = __vmalloc(c->leb_size, GFP_NOFS); if (!buf) return -ENOMEM; diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index ff5e0411cf2d..d76a19e460cd 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1596,7 +1596,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) if (!dbg_is_chk_lprops(c)) return 0; - buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); + buf = p = __vmalloc(c->leb_size, GFP_NOFS); if (!buf) { ubifs_err(c, "cannot allocate memory for ltab checking"); return 0; @@ -1845,7 +1845,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum) void *buf, *p; pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum); - buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); + buf = p = __vmalloc(c->leb_size, GFP_NOFS); if (!buf) { ubifs_err(c, "cannot allocate memory to dump LPT"); return; diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c index 52a85c01397e..911d0555b9f2 100644 --- a/fs/ubifs/master.c +++ b/fs/ubifs/master.c @@ -68,12 +68,9 @@ static int mst_node_check_hash(const struct ubifs_info *c, u8 calc[UBIFS_MAX_HASH_LEN]; const void *node = mst; - SHASH_DESC_ON_STACK(shash, c->hash_tfm); - - shash->tfm = c->hash_tfm; - - crypto_shash_digest(shash, node + sizeof(struct ubifs_ch), - UBIFS_MST_NODE_SZ - sizeof(struct ubifs_ch), calc); + crypto_shash_tfm_digest(c->hash_tfm, node + sizeof(struct ubifs_ch), + UBIFS_MST_NODE_SZ - sizeof(struct ubifs_ch), + calc); if (ubifs_check_hash(c, expected, calc)) return -EPERM; diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 283f9eb48410..2c294085ffed 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -977,7 +977,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci) if (c->no_orphs) return 0; - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); + buf = __vmalloc(c->leb_size, GFP_NOFS); if (!buf) { ubifs_err(c, "cannot allocate memory to check orphans"); return 0; diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 01fcf7975047..b69ffac7e415 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -558,7 +558,7 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud) return data == 0xFFFFFFFF; } -/* authenticate_sleb_hash and authenticate_sleb_hmac are split out for stack usage */ +/* authenticate_sleb_hash is split out for stack usage */ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash) { SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); @@ -569,15 +569,6 @@ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_h return crypto_shash_final(hash_desc, hash); } -static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac) -{ - SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm); - - hmac_desc->tfm = c->hmac_tfm; - - return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac); -} - /** * authenticate_sleb - authenticate one scan LEB * @c: UBIFS file-system description object @@ -618,7 +609,8 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, if (err) goto out; - err = authenticate_sleb_hmac(c, hash, hmac); + err = crypto_shash_tfm_digest(c->hmac_tfm, hash, + c->hash_len, hmac); if (err) goto out; diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig index 6848de581ce1..26e1a49f3ba7 100644 --- a/fs/udf/Kconfig +++ b/fs/udf/Kconfig @@ -9,7 +9,7 @@ config UDF_FS compatible with standard unix file systems, it is also suitable for removable USB disks. Say Y if you intend to mount DVD discs or CDRW's written in packet mode, or if you want to use UDF for removable USB - disks. Please read <file:Documentation/filesystems/udf.txt>. + disks. Please read <file:Documentation/filesystems/udf.rst>. To compile this file system support as a module, choose M here: the module will be called udf. diff --git a/fs/udf/inode.c b/fs/udf/inode.c index e875bc5668ee..adaba8e8b326 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -195,10 +195,9 @@ static int udf_readpage(struct file *file, struct page *page) return mpage_readpage(page, udf_get_block); } -static int udf_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void udf_readahead(struct readahead_control *rac) { - return mpage_readpages(mapping, pages, nr_pages, udf_get_block); + mpage_readahead(rac, udf_get_block); } static int udf_write_begin(struct file *file, struct address_space *mapping, @@ -234,7 +233,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations udf_aops = { .readpage = udf_readpage, - .readpages = udf_readpages, + .readahead = udf_readahead, .writepage = udf_writepage, .writepages = udf_writepages, .write_begin = udf_write_begin, diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c index 5c7ec121990d..f1094cdcd6cd 100644 --- a/fs/udf/lowlevel.c +++ b/fs/udf/lowlevel.c @@ -27,41 +27,38 @@ unsigned int udf_get_last_session(struct super_block *sb) { + struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk); struct cdrom_multisession ms_info; - unsigned int vol_desc_start; - struct block_device *bdev = sb->s_bdev; - int i; - vol_desc_start = 0; - ms_info.addr_format = CDROM_LBA; - i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long)&ms_info); + if (!cdi) { + udf_debug("CDROMMULTISESSION not supported.\n"); + return 0; + } - if (i == 0) { + ms_info.addr_format = CDROM_LBA; + if (cdrom_multisession(cdi, &ms_info) == 0) { udf_debug("XA disk: %s, vol_desc_start=%d\n", ms_info.xa_flag ? "yes" : "no", ms_info.addr.lba); if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */ - vol_desc_start = ms_info.addr.lba; - } else { - udf_debug("CDROMMULTISESSION not supported: rc=%d\n", i); + return ms_info.addr.lba; } - return vol_desc_start; + return 0; } unsigned long udf_get_last_block(struct super_block *sb) { struct block_device *bdev = sb->s_bdev; + struct cdrom_device_info *cdi = disk_to_cdi(bdev->bd_disk); unsigned long lblock = 0; /* - * ioctl failed or returned obviously bogus value? + * The cdrom layer call failed or returned obviously bogus value? * Try using the device size... */ - if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock) || - lblock == 0) + if (!cdi || cdrom_get_last_written(cdi, &lblock) || lblock == 0) lblock = i_size_read(bdev->bd_inode) >> sb->s_blocksize_bits; if (lblock) return lblock - 1; - else - return 0; + return 0; } diff --git a/fs/utimes.c b/fs/utimes.c index 1d17ce98cb80..b7b927502d6e 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -95,13 +95,13 @@ long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, goto out; } - if (flags & ~AT_SYMLINK_NOFOLLOW) + if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) goto out; if (filename == NULL && dfd != AT_FDCWD) { struct fd f; - if (flags & AT_SYMLINK_NOFOLLOW) + if (flags) goto out; f = fdget(dfd); @@ -117,6 +117,8 @@ long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, if (!(flags & AT_SYMLINK_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; + if (flags & AT_EMPTY_PATH) + lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) diff --git a/fs/verity/enable.c b/fs/verity/enable.c index d98bea308fd7..5ab3bbec8108 100644 --- a/fs/verity/enable.c +++ b/fs/verity/enable.c @@ -329,6 +329,8 @@ rollback: /** * fsverity_ioctl_enable() - enable verity on a file + * @filp: file to enable verity on + * @uarg: user pointer to fsverity_enable_arg * * Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of * Documentation/filesystems/fsverity.rst for the documentation. diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h index 74768cf539da..e96d99d5145e 100644 --- a/fs/verity/fsverity_private.h +++ b/fs/verity/fsverity_private.h @@ -61,7 +61,7 @@ struct merkle_tree_params { u64 level_start[FS_VERITY_MAX_LEVELS]; }; -/** +/* * fsverity_info - cached verity metadata for an inode * * When a verity file is first opened, an instance of this struct is allocated @@ -134,7 +134,7 @@ void __init fsverity_check_hash_algs(void); /* init.c */ -extern void __printf(3, 4) __cold +void __printf(3, 4) __cold fsverity_msg(const struct inode *inode, const char *level, const char *fmt, ...); diff --git a/fs/verity/measure.c b/fs/verity/measure.c index 05049b68c745..df409a5682ed 100644 --- a/fs/verity/measure.c +++ b/fs/verity/measure.c @@ -11,6 +11,8 @@ /** * fsverity_ioctl_measure() - get a verity file's measurement + * @filp: file to get measurement of + * @_uarg: user pointer to fsverity_digest * * Retrieve the file measurement that the kernel is enforcing for reads from a * verity file. See the "FS_IOC_MEASURE_VERITY" section of diff --git a/fs/verity/open.c b/fs/verity/open.c index c5fe6948e262..d007db0c9304 100644 --- a/fs/verity/open.c +++ b/fs/verity/open.c @@ -330,6 +330,7 @@ EXPORT_SYMBOL_GPL(fsverity_prepare_setattr); /** * fsverity_cleanup_inode() - free the inode's verity info, if present + * @inode: an inode being evicted * * Filesystems must call this on inode eviction to free ->i_verity_info. */ diff --git a/fs/verity/signature.c b/fs/verity/signature.c index c8b255232de5..b14ed96387ec 100644 --- a/fs/verity/signature.c +++ b/fs/verity/signature.c @@ -28,6 +28,9 @@ static struct key *fsverity_keyring; /** * fsverity_verify_signature() - check a verity file's signature + * @vi: the file's fsverity_info + * @desc: the file's fsverity_descriptor + * @desc_size: size of @desc * * If the file's fs-verity descriptor includes a signature of the file * measurement, verify it against the certificates in the fs-verity keyring. diff --git a/fs/verity/verify.c b/fs/verity/verify.c index e0cb62da3864..a8b68c6f663d 100644 --- a/fs/verity/verify.c +++ b/fs/verity/verify.c @@ -179,6 +179,7 @@ out: /** * fsverity_verify_page() - verify a data page + * @page: the page to verity * * Verify a page that has just been read from a verity file. The page must be a * pagecache page that is still locked and not yet uptodate. @@ -206,6 +207,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page); #ifdef CONFIG_BLOCK /** * fsverity_verify_bio() - verify a 'read' bio that has just completed + * @bio: the bio to verify * * Verify a set of pages that have just been read from a verity file. The pages * must be pagecache pages that are still locked and not yet uptodate. Pages @@ -264,6 +266,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_bio); /** * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue + * @work: the work to enqueue * * Enqueue verification work for asynchronous processing. */ diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 4f95df476181..04611a1068b4 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -7,8 +7,6 @@ ccflags-y += -I $(srctree)/$(src) # needed for trace events ccflags-y += -I $(srctree)/$(src)/libxfs -ccflags-$(CONFIG_XFS_DEBUG) += -g - obj-$(CONFIG_XFS_FS) += xfs.o # this one should be compiled first, as the tracing macros can easily blow up @@ -101,9 +99,12 @@ xfs-y += xfs_log.o \ xfs_log_cil.o \ xfs_bmap_item.o \ xfs_buf_item.o \ + xfs_buf_item_recover.o \ + xfs_dquot_item_recover.o \ xfs_extfree_item.o \ xfs_icreate_item.o \ xfs_inode_item.o \ + xfs_inode_item_recover.o \ xfs_refcount_item.o \ xfs_rmap_item.o \ xfs_log_recover.o \ diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index 1da94237a8cf..f1366475c389 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -48,7 +48,7 @@ __kmem_vmalloc(size_t size, xfs_km_flags_t flags) if (flags & KM_NOFS) nofs_flag = memalloc_nofs_save(); - ptr = __vmalloc(size, lflags, PAGE_KERNEL); + ptr = __vmalloc(size, lflags); if (flags & KM_NOFS) memalloc_nofs_restore(nofs_flag); diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 6143117770e9..34cbcfde9228 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. @@ -19,6 +19,7 @@ typedef unsigned __bitwise xfs_km_flags_t; #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) +#define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u) /* * We use a special process flag to avoid recursive callbacks into @@ -30,7 +31,7 @@ kmem_flags_convert(xfs_km_flags_t flags) { gfp_t lflags; - BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO)); + BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP)); lflags = GFP_KERNEL | __GFP_NOWARN; if (flags & KM_NOFS) @@ -49,6 +50,9 @@ kmem_flags_convert(xfs_km_flags_t flags) if (flags & KM_ZERO) lflags |= __GFP_ZERO; + if (flags & KM_NOLOCKDEP) + lflags |= __GFP_NOLOCKDEP; + return lflags; } diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h index c0352edc8e41..f3fd0ee9a7f7 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.h +++ b/fs/xfs/libxfs/xfs_ag_resv.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2016 Oracle. All Rights Reserved. * Author: Darrick J. Wong <darrick.wong@oracle.com> diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index a851bf77f17b..6c22b12176b8 100644 --- a/fs/xfs/libxfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_alloc_btree.h b/fs/xfs/libxfs/xfs_alloc_btree.h index 047f09f0be3c..a5b998e950fe 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.h +++ b/fs/xfs/libxfs/xfs_alloc_btree.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000,2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index e4fe3dca9883..3b1bd6e112f8 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -61,8 +61,8 @@ xfs_inode_hasattr( struct xfs_inode *ip) { if (!XFS_IFORK_Q(ip) || - (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && - ip->i_d.di_anextents == 0)) + (ip->i_afp->if_format == XFS_DINODE_FMT_EXTENTS && + ip->i_afp->if_nextents == 0)) return 0; return 1; } @@ -84,7 +84,7 @@ xfs_attr_get_ilocked( if (!xfs_inode_hasattr(args->dp)) return -ENOATTR; - if (args->dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) + if (args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL) return xfs_attr_shortform_getvalue(args); if (xfs_bmap_one_block(args->dp, XFS_ATTR_FORK)) return xfs_attr_leaf_get(args); @@ -212,14 +212,14 @@ xfs_attr_set_args( * If the attribute list is non-existent or a shortform list, * upgrade it to a single-leaf-block attribute list. */ - if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL || - (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && - dp->i_d.di_anextents == 0)) { + if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL || + (dp->i_afp->if_format == XFS_DINODE_FMT_EXTENTS && + dp->i_afp->if_nextents == 0)) { /* * Build initial attribute list (if required). */ - if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) + if (dp->i_afp->if_format == XFS_DINODE_FMT_EXTENTS) xfs_attr_shortform_create(args); /* @@ -272,7 +272,7 @@ xfs_attr_remove_args( if (!xfs_inode_hasattr(dp)) { error = -ENOATTR; - } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { + } else if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL) { ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); error = xfs_attr_shortform_remove(args); } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h index 0d2d05908537..db4717657ca1 100644 --- a/fs/xfs/libxfs/xfs_attr.h +++ b/fs/xfs/libxfs/xfs_attr.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 863444e2dda7..2f7e89e4be3e 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -309,14 +309,6 @@ xfs_attr3_leaf_verify( return fa; /* - * In recovery there is a transient state where count == 0 is valid - * because we may have transitioned an empty shortform attr to a leaf - * if the attr didn't fit in shortform. - */ - if (!xfs_log_in_recovery(mp) && ichdr.count == 0) - return __this_address; - - /* * firstused is the block offset of the first name info structure. * Make sure it doesn't go off the block or crash into the header. */ @@ -331,6 +323,13 @@ xfs_attr3_leaf_verify( (char *)bp->b_addr + ichdr.firstused) return __this_address; + /* + * NOTE: This verifier historically failed empty leaf buffers because + * we expect the fork to be in another format. Empty attr fork format + * conversions are possible during xattr set, however, and format + * conversion is not atomic with the xattr set that triggers it. We + * cannot assume leaf blocks are non-empty until that is addressed. + */ buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize; for (i = 0, ent = entries; i < ichdr.count; ent++, i++) { fa = xfs_attr3_leaf_verify_entry(mp, buf_end, leaf, &ichdr, @@ -489,7 +488,7 @@ xfs_attr_copy_value( } if (!args->value) { - args->value = kmem_alloc_large(valuelen, 0); + args->value = kmem_alloc_large(valuelen, KM_NOLOCKDEP); if (!args->value) return -ENOMEM; } @@ -539,7 +538,7 @@ xfs_attr_shortform_bytesfit( /* rounded down */ offset = (XFS_LITINO(mp) - bytes) >> 3; - if (dp->i_d.di_format == XFS_DINODE_FMT_DEV) { + if (dp->i_df.if_format == XFS_DINODE_FMT_DEV) { minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; return (offset >= minforkoff) ? minforkoff : 0; } @@ -567,7 +566,7 @@ xfs_attr_shortform_bytesfit( dsize = dp->i_df.if_bytes; - switch (dp->i_d.di_format) { + switch (dp->i_df.if_format) { case XFS_DINODE_FMT_EXTENTS: /* * If there is no attr fork and the data fork is extents, @@ -636,22 +635,19 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp) * Create the initial contents of a shortform attribute list. */ void -xfs_attr_shortform_create(xfs_da_args_t *args) +xfs_attr_shortform_create( + struct xfs_da_args *args) { - xfs_attr_sf_hdr_t *hdr; - xfs_inode_t *dp; - struct xfs_ifork *ifp; + struct xfs_inode *dp = args->dp; + struct xfs_ifork *ifp = dp->i_afp; + struct xfs_attr_sf_hdr *hdr; trace_xfs_attr_sf_create(args); - dp = args->dp; - ASSERT(dp != NULL); - ifp = dp->i_afp; - ASSERT(ifp != NULL); ASSERT(ifp->if_bytes == 0); - if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) { + if (ifp->if_format == XFS_DINODE_FMT_EXTENTS) { ifp->if_flags &= ~XFS_IFEXTENTS; /* just in case */ - dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL; + ifp->if_format = XFS_DINODE_FMT_LOCAL; ifp->if_flags |= XFS_IFINLINE; } else { ASSERT(ifp->if_flags & XFS_IFINLINE); @@ -719,13 +715,12 @@ xfs_attr_fork_remove( struct xfs_inode *ip, struct xfs_trans *tp) { - xfs_idestroy_fork(ip, XFS_ATTR_FORK); - ip->i_d.di_forkoff = 0; - ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; - - ASSERT(ip->i_d.di_anextents == 0); - ASSERT(ip->i_afp == NULL); + ASSERT(ip->i_afp->if_nextents == 0); + xfs_idestroy_fork(ip->i_afp); + kmem_cache_free(xfs_ifork_zone, ip->i_afp); + ip->i_afp = NULL; + ip->i_d.di_forkoff = 0; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } @@ -775,7 +770,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) totsize -= size; if (totsize == sizeof(xfs_attr_sf_hdr_t) && (mp->m_flags & XFS_MOUNT_ATTR2) && - (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && + (dp->i_df.if_format != XFS_DINODE_FMT_BTREE) && !(args->op_flags & XFS_DA_OP_ADDNAME)) { xfs_attr_fork_remove(dp, args->trans); } else { @@ -785,7 +780,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || (args->op_flags & XFS_DA_OP_ADDNAME) || !(mp->m_flags & XFS_MOUNT_ATTR2) || - dp->i_d.di_format == XFS_DINODE_FMT_BTREE); + dp->i_df.if_format == XFS_DINODE_FMT_BTREE); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } @@ -962,7 +957,7 @@ xfs_attr_shortform_allfit( + be16_to_cpu(name_loc->valuelen); } if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && - (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && + (dp->i_df.if_format != XFS_DINODE_FMT_BTREE) && (bytes == sizeof(struct xfs_attr_sf_hdr))) return -1; return xfs_attr_shortform_bytesfit(dp, bytes); @@ -981,7 +976,7 @@ xfs_attr_shortform_verify( int i; int64_t size; - ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL); + ASSERT(ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL); ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK); sfp = (struct xfs_attr_shortform *)ifp->if_u1.if_data; size = ifp->if_bytes; @@ -1085,7 +1080,7 @@ xfs_attr3_leaf_to_shortform( if (forkoff == -1) { ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); - ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); + ASSERT(dp->i_df.if_format != XFS_DINODE_FMT_BTREE); xfs_attr_fork_remove(dp, args->trans); goto out; } diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h index 6dd2d937a42a..5be6be309302 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.h +++ b/fs/xfs/libxfs/xfs_attr_leaf.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h index 6fb4572845ce..e1144f22b005 100644 --- a/fs/xfs/libxfs/xfs_attr_remote.h +++ b/fs/xfs/libxfs/xfs_attr_remote.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_attr_sf.h b/fs/xfs/libxfs/xfs_attr_sf.h index aafa4fe70624..bb004fb7944a 100644 --- a/fs/xfs/libxfs/xfs_attr_sf.h +++ b/fs/xfs/libxfs/xfs_attr_sf.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_bit.h b/fs/xfs/libxfs/xfs_bit.h index 99017b8df292..a04f266ae644 100644 --- a/fs/xfs/libxfs/xfs_bit.h +++ b/fs/xfs/libxfs/xfs_bit.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index fda13cd7add0..667cdd0dfdf4 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -61,10 +61,10 @@ xfs_bmap_compute_maxlevels( int sz; /* root block size */ /* - * The maximum number of extents in a file, hence the maximum - * number of leaf entries, is controlled by the type of di_nextents - * (a signed 32-bit number, xfs_extnum_t), or by di_anextents - * (a signed 16-bit number, xfs_aextnum_t). + * The maximum number of extents in a file, hence the maximum number of + * leaf entries, is controlled by the size of the on-disk extent count, + * either a signed 32-bit number for the data fork, or a signed 16-bit + * number for the attr fork. * * Note that we can no longer assume that if we are in ATTR1 that * the fork offset of all the inodes will be @@ -120,10 +120,11 @@ xfs_bmbt_lookup_first( */ static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) { + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); + return whichfork != XFS_COW_FORK && - XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && - XFS_IFORK_NEXTENTS(ip, whichfork) > - XFS_IFORK_MAXEXT(ip, whichfork); + ifp->if_format == XFS_DINODE_FMT_EXTENTS && + ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork); } /* @@ -131,10 +132,11 @@ static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) */ static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) { + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); + return whichfork != XFS_COW_FORK && - XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && - XFS_IFORK_NEXTENTS(ip, whichfork) <= - XFS_IFORK_MAXEXT(ip, whichfork); + ifp->if_format == XFS_DINODE_FMT_BTREE && + ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork); } /* @@ -213,8 +215,8 @@ xfs_bmap_forkoff_reset( int whichfork) { if (whichfork == XFS_ATTR_FORK && - ip->i_d.di_format != XFS_DINODE_FMT_DEV && - ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { + ip->i_df.if_format != XFS_DINODE_FMT_DEV && + ip->i_df.if_format != XFS_DINODE_FMT_BTREE) { uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; if (dfl_forkoff > ip->i_d.di_forkoff) @@ -315,31 +317,28 @@ xfs_bmap_check_leaf_extents( xfs_inode_t *ip, /* incore inode pointer */ int whichfork) /* data or attr fork */ { + struct xfs_mount *mp = ip->i_mount; + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_btree_block *block; /* current btree block */ xfs_fsblock_t bno; /* block # of "block" */ xfs_buf_t *bp; /* buffer for "block" */ int error; /* error return value */ xfs_extnum_t i=0, j; /* index into the extents list */ - struct xfs_ifork *ifp; /* fork structure */ int level; /* btree level, for checking */ - xfs_mount_t *mp; /* file system mount structure */ __be64 *pp; /* pointer to block address */ xfs_bmbt_rec_t *ep; /* pointer to current extent */ xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ xfs_bmbt_rec_t *nextp; /* pointer to next extent */ int bp_release = 0; - if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { + if (ifp->if_format != XFS_DINODE_FMT_BTREE) return; - } /* skip large extent count inodes */ - if (ip->i_d.di_nextents > 10000) + if (ip->i_df.if_nextents > 10000) return; bno = NULLFSBLOCK; - mp = ip->i_mount; - ifp = XFS_IFORK_PTR(ip, whichfork); block = ifp->if_broot; /* * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. @@ -604,7 +603,7 @@ xfs_bmap_btree_to_extents( ASSERT(cur); ASSERT(whichfork != XFS_COW_FORK); ASSERT(ifp->if_flags & XFS_IFEXTENTS); - ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); + ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); ASSERT(be16_to_cpu(rblock->bb_level) == 1); ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); @@ -632,7 +631,7 @@ xfs_bmap_btree_to_extents( xfs_iroot_realloc(ip, -1, whichfork); ASSERT(ifp->if_broot == NULL); ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); - XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + ifp->if_format = XFS_DINODE_FMT_EXTENTS; *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); return 0; } @@ -668,7 +667,7 @@ xfs_bmap_extents_to_btree( mp = ip->i_mount; ASSERT(whichfork != XFS_COW_FORK); ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); + ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS); /* * Make space in the inode incore. This needs to be undone if we fail @@ -692,7 +691,7 @@ xfs_bmap_extents_to_btree( /* * Convert to a btree with two levels, one record in root. */ - XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); + ifp->if_format = XFS_DINODE_FMT_BTREE; memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = mp; @@ -750,7 +749,7 @@ xfs_bmap_extents_to_btree( xfs_bmbt_disk_set_all(arp, &rec); cnt++; } - ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); + ASSERT(cnt == ifp->if_nextents); xfs_btree_set_numrecs(ablock, cnt); /* @@ -778,7 +777,7 @@ out_unreserve_dquot: xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); out_root_realloc: xfs_iroot_realloc(ip, -1, whichfork); - XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + ifp->if_format = XFS_DINODE_FMT_EXTENTS; ASSERT(ifp->if_broot == NULL); xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); @@ -800,16 +799,16 @@ xfs_bmap_local_to_extents_empty( struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(whichfork != XFS_COW_FORK); - ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); + ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); ASSERT(ifp->if_bytes == 0); - ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); + ASSERT(ifp->if_nextents == 0); xfs_bmap_forkoff_reset(ip, whichfork); ifp->if_flags &= ~XFS_IFINLINE; ifp->if_flags |= XFS_IFEXTENTS; ifp->if_u1.if_root = NULL; ifp->if_height = 0; - XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + ifp->if_format = XFS_DINODE_FMT_EXTENTS; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } @@ -840,7 +839,7 @@ xfs_bmap_local_to_extents( */ ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); + ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); if (!ifp->if_bytes) { xfs_bmap_local_to_extents_empty(tp, ip, whichfork); @@ -907,7 +906,7 @@ xfs_bmap_local_to_extents( xfs_iext_first(ifp, &icur); xfs_iext_insert(ip, &icur, &rec, 0); - XFS_IFORK_NEXT_SET(ip, whichfork, 1); + ifp->if_nextents = 1; ip->i_d.di_nblocks = 1; xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); @@ -972,7 +971,8 @@ xfs_bmap_add_attrfork_extents( xfs_btree_cur_t *cur; /* bmap btree cursor */ int error; /* error return value */ - if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) + if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <= + XFS_IFORK_DSIZE(ip)) return 0; cur = NULL; error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, @@ -1033,7 +1033,7 @@ xfs_bmap_set_attrforkoff( int size, int *version) { - switch (ip->i_d.di_format) { + switch (ip->i_df.if_format) { case XFS_DINODE_FMT_DEV: ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; break; @@ -1091,17 +1091,6 @@ xfs_bmap_add_attrfork( goto trans_cancel; if (XFS_IFORK_Q(ip)) goto trans_cancel; - if (XFS_IS_CORRUPT(mp, ip->i_d.di_anextents != 0)) { - error = -EFSCORRUPTED; - goto trans_cancel; - } - if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { - /* - * For inodes coming from pre-6.2 filesystems. - */ - ASSERT(ip->i_d.di_aformat == 0); - ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; - } xfs_trans_ijoin(tp, ip, 0); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); @@ -1110,9 +1099,10 @@ xfs_bmap_add_attrfork( goto trans_cancel; ASSERT(ip->i_afp == NULL); ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0); + ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS; ip->i_afp->if_flags = XFS_IFEXTENTS; logflags = 0; - switch (ip->i_d.di_format) { + switch (ip->i_df.if_format) { case XFS_DINODE_FMT_LOCAL: error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); break; @@ -1183,13 +1173,13 @@ xfs_iread_bmbt_block( xfs_extnum_t num_recs; xfs_extnum_t j; int whichfork = cur->bc_ino.whichfork; + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); block = xfs_btree_get_block(cur, level, &bp); /* Abort if we find more records than nextents. */ num_recs = xfs_btree_get_numrecs(block); - if (unlikely(ir->loaded + num_recs > - XFS_IFORK_NEXTENTS(ip, whichfork))) { + if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) { xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).", (unsigned long long)ip->i_ino); xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block, @@ -1215,7 +1205,7 @@ xfs_iread_bmbt_block( xfs_bmap_fork_to_state(whichfork)); trace_xfs_read_extent(ip, &ir->icur, xfs_bmap_fork_to_state(whichfork), _THIS_IP_); - xfs_iext_next(XFS_IFORK_PTR(ip, whichfork), &ir->icur); + xfs_iext_next(ifp, &ir->icur); } return 0; @@ -1238,9 +1228,7 @@ xfs_iread_extents( ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - if (XFS_IS_CORRUPT(mp, - XFS_IFORK_FORMAT(ip, whichfork) != - XFS_DINODE_FMT_BTREE)) { + if (XFS_IS_CORRUPT(mp, ifp->if_format != XFS_DINODE_FMT_BTREE)) { error = -EFSCORRUPTED; goto out; } @@ -1254,8 +1242,7 @@ xfs_iread_extents( if (error) goto out; - if (XFS_IS_CORRUPT(mp, - ir.loaded != XFS_IFORK_NEXTENTS(ip, whichfork))) { + if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) { error = -EFSCORRUPTED; goto out; } @@ -1289,14 +1276,13 @@ xfs_bmap_first_unused( xfs_fileoff_t lowest, max; int error; - ASSERT(xfs_ifork_has_extents(ip, whichfork) || - XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); - - if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + if (ifp->if_format == XFS_DINODE_FMT_LOCAL) { *first_unused = 0; return 0; } + ASSERT(xfs_ifork_has_extents(ifp)); + if (!(ifp->if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(tp, ip, whichfork); if (error) @@ -1337,7 +1323,7 @@ xfs_bmap_last_before( struct xfs_iext_cursor icur; int error; - switch (XFS_IFORK_FORMAT(ip, whichfork)) { + switch (ifp->if_format) { case XFS_DINODE_FMT_LOCAL: *last_block = 0; return 0; @@ -1436,16 +1422,17 @@ xfs_bmap_last_offset( xfs_fileoff_t *last_block, int whichfork) { + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_bmbt_irec rec; int is_empty; int error; *last_block = 0; - if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) + if (ifp->if_format == XFS_DINODE_FMT_LOCAL) return 0; - if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ip, whichfork))) + if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) return -EFSCORRUPTED; error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); @@ -1463,23 +1450,22 @@ xfs_bmap_last_offset( */ int /* 1=>1 block, 0=>otherwise */ xfs_bmap_one_block( - xfs_inode_t *ip, /* incore inode */ - int whichfork) /* data or attr fork */ + struct xfs_inode *ip, /* incore inode */ + int whichfork) /* data or attr fork */ { - struct xfs_ifork *ifp; /* inode fork pointer */ - int rval; /* return value */ - xfs_bmbt_irec_t s; /* internal version of extent */ + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); + int rval; /* return value */ + struct xfs_bmbt_irec s; /* internal version of extent */ struct xfs_iext_cursor icur; #ifndef DEBUG if (whichfork == XFS_DATA_FORK) return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; #endif /* !DEBUG */ - if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) + if (ifp->if_nextents != 1) return 0; - if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) return 0; - ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); xfs_iext_first(ifp, &icur); xfs_iext_get_extent(ifp, &icur, &s); @@ -1501,10 +1487,11 @@ xfs_bmap_add_extent_delay_real( struct xfs_bmalloca *bma, int whichfork) { + struct xfs_mount *mp = bma->ip->i_mount; + struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); struct xfs_bmbt_irec *new = &bma->got; int error; /* error return value */ int i; /* temp state */ - struct xfs_ifork *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ /* left is 0, right is 1, prev is 2 */ @@ -1514,16 +1501,9 @@ xfs_bmap_add_extent_delay_real( xfs_filblks_t da_old; /* old count del alloc blocks used */ xfs_filblks_t temp=0; /* value for da_new calculations */ int tmp_rval; /* partial logging flags */ - struct xfs_mount *mp; - xfs_extnum_t *nextents; struct xfs_bmbt_irec old; - mp = bma->ip->i_mount; - ifp = XFS_IFORK_PTR(bma->ip, whichfork); ASSERT(whichfork != XFS_ATTR_FORK); - nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : - &bma->ip->i_d.di_nextents); - ASSERT(!isnullstartblock(new->br_startblock)); ASSERT(!bma->cur || (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); @@ -1614,7 +1594,7 @@ xfs_bmap_add_extent_delay_real( xfs_iext_remove(bma->ip, &bma->icur, state); xfs_iext_prev(ifp, &bma->icur); xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); - (*nextents)--; + ifp->if_nextents--; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; @@ -1718,8 +1698,8 @@ xfs_bmap_add_extent_delay_real( PREV.br_startblock = new->br_startblock; PREV.br_state = new->br_state; xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); + ifp->if_nextents++; - (*nextents)++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -1784,7 +1764,8 @@ xfs_bmap_add_extent_delay_real( * The left neighbor is not contiguous. */ xfs_iext_update_extent(bma->ip, state, &bma->icur, new); - (*nextents)++; + ifp->if_nextents++; + if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -1870,7 +1851,8 @@ xfs_bmap_add_extent_delay_real( * The right neighbor is not contiguous. */ xfs_iext_update_extent(bma->ip, state, &bma->icur, new); - (*nextents)++; + ifp->if_nextents++; + if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -1955,7 +1937,7 @@ xfs_bmap_add_extent_delay_real( xfs_iext_next(ifp, &bma->icur); xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); - (*nextents)++; + ifp->if_nextents++; if (bma->cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; @@ -2159,8 +2141,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &LEFT); - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) - 2); + ifp->if_nextents -= 2; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -2212,8 +2193,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &LEFT); - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) - 1); + ifp->if_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -2255,9 +2235,8 @@ xfs_bmap_add_extent_unwritten_real( xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &PREV); + ifp->if_nextents--; - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) - 1); if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -2364,8 +2343,8 @@ xfs_bmap_add_extent_unwritten_real( xfs_iext_update_extent(ip, state, icur, &PREV); xfs_iext_insert(ip, icur, new, state); - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + ifp->if_nextents++; + if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -2440,9 +2419,8 @@ xfs_bmap_add_extent_unwritten_real( xfs_iext_update_extent(ip, state, icur, &PREV); xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, new, state); + ifp->if_nextents++; - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) + 1); if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -2493,9 +2471,8 @@ xfs_bmap_add_extent_unwritten_real( xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, &r[1], state); xfs_iext_insert(ip, icur, &r[0], state); + ifp->if_nextents += 2; - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) + 2); if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; else { @@ -2810,9 +2787,8 @@ xfs_bmap_add_extent_hole_real( xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &left); + ifp->if_nextents--; - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) - 1); if (cur == NULL) { rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); } else { @@ -2910,8 +2886,8 @@ xfs_bmap_add_extent_hole_real( * Insert a new entry. */ xfs_iext_insert(ip, icur, new, state); - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + ifp->if_nextents++; + if (cur == NULL) { rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); } else { @@ -3891,7 +3867,8 @@ xfs_bmapi_read( int flags) { struct xfs_mount *mp = ip->i_mount; - struct xfs_ifork *ifp; + int whichfork = xfs_bmapi_whichfork(flags); + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_bmbt_irec got; xfs_fileoff_t obno; xfs_fileoff_t end; @@ -3899,48 +3876,23 @@ xfs_bmapi_read( int error; bool eof = false; int n = 0; - int whichfork = xfs_bmapi_whichfork(flags); ASSERT(*nmap >= 1); - ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| - XFS_BMAPI_COWFORK))); + ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE))); ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); - if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) || - XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { + if (WARN_ON_ONCE(!ifp)) + return -EFSCORRUPTED; + + if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || + XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) return -EFSCORRUPTED; - } if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; XFS_STATS_INC(mp, xs_blk_mapr); - ifp = XFS_IFORK_PTR(ip, whichfork); - if (!ifp) { - /* No CoW fork? Return a hole. */ - if (whichfork == XFS_COW_FORK) { - mval->br_startoff = bno; - mval->br_startblock = HOLESTARTBLOCK; - mval->br_blockcount = len; - mval->br_state = XFS_EXT_NORM; - *nmap = 1; - return 0; - } - - /* - * A missing attr ifork implies that the inode says we're in - * extents or btree format but failed to pass the inode fork - * verifier while trying to load it. Treat that as a file - * corruption too. - */ -#ifdef DEBUG - xfs_alert(mp, "%s: inode %llu missing fork %d", - __func__, ip->i_ino, whichfork); -#endif /* DEBUG */ - return -EFSCORRUPTED; - } - if (!(ifp->if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(NULL, ip, whichfork); if (error) @@ -4193,17 +4145,7 @@ xfs_bmapi_allocate( bma->got.br_blockcount = bma->length; bma->got.br_state = XFS_EXT_NORM; - /* - * In the data fork, a wasdelay extent has been initialized, so - * shouldn't be flagged as unwritten. - * - * For the cow fork, however, we convert delalloc reservations - * (extents allocated for speculative preallocation) to - * allocated unwritten extents, and only convert the unwritten - * extents to real extents when we're about to write the data. - */ - if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && - (bma->flags & XFS_BMAPI_PREALLOC)) + if (bma->flags & XFS_BMAPI_PREALLOC) bma->got.br_state = XFS_EXT_UNWRITTEN; if (bma->wasdel) @@ -4317,11 +4259,13 @@ xfs_bmapi_minleft( struct xfs_inode *ip, int fork) { + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, fork); + if (tp && tp->t_firstblock != NULLFSBLOCK) return 0; - if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE) + if (ifp->if_format != XFS_DINODE_FMT_BTREE) return 1; - return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1; + return be16_to_cpu(ifp->if_broot->bb_level) + 1; } /* @@ -4336,11 +4280,13 @@ xfs_bmapi_finish( int whichfork, int error) { + struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); + if ((bma->logflags & xfs_ilog_fext(whichfork)) && - XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + ifp->if_format != XFS_DINODE_FMT_EXTENTS) bma->logflags &= ~xfs_ilog_fext(whichfork); else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && - XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE) + ifp->if_format != XFS_DINODE_FMT_BTREE) bma->logflags &= ~xfs_ilog_fbroot(whichfork); if (bma->logflags) @@ -4372,13 +4318,13 @@ xfs_bmapi_write( .total = total, }; struct xfs_mount *mp = ip->i_mount; - struct xfs_ifork *ifp; + int whichfork = xfs_bmapi_whichfork(flags); + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); xfs_fileoff_t end; /* end of mapped file region */ bool eof = false; /* after the end of extents */ int error; /* error return */ int n; /* current extent index */ xfs_fileoff_t obno; /* old block number (offset) */ - int whichfork; /* data or attr fork */ #ifdef DEBUG xfs_fileoff_t orig_bno; /* original block number value */ @@ -4393,13 +4339,12 @@ xfs_bmapi_write( orig_mval = mval; orig_nmap = *nmap; #endif - whichfork = xfs_bmapi_whichfork(flags); ASSERT(*nmap >= 1); ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); ASSERT(tp != NULL); ASSERT(len > 0); - ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); + ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(!(flags & XFS_BMAPI_REMAP)); @@ -4415,7 +4360,7 @@ xfs_bmapi_write( ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); - if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) || + if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { return -EFSCORRUPTED; } @@ -4423,8 +4368,6 @@ xfs_bmapi_write( if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; - ifp = XFS_IFORK_PTR(ip, whichfork); - XFS_STATS_INC(mp, xs_blk_mapw); if (!(ifp->if_flags & XFS_IFEXTENTS)) { @@ -4534,9 +4477,8 @@ xfs_bmapi_write( if (error) goto error0; - ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || - XFS_IFORK_NEXTENTS(ip, whichfork) > - XFS_IFORK_MAXEXT(ip, whichfork)); + ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE || + ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork)); xfs_bmapi_finish(&bma, whichfork, 0); xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, orig_nmap, *nmap); @@ -4611,8 +4553,23 @@ xfs_bmapi_convert_delalloc( bma.offset = bma.got.br_startoff; bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN); bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); + + /* + * When we're converting the delalloc reservations backing dirty pages + * in the page cache, we must be careful about how we create the new + * extents: + * + * New CoW fork extents are created unwritten, turned into real extents + * when we're about to write the data to disk, and mapped into the data + * fork after the write finishes. End of story. + * + * New data fork extents must be mapped in as unwritten and converted + * to real extents after the write succeeds to avoid exposing stale + * disk contents if we crash. + */ + bma.flags = XFS_BMAPI_PREALLOC; if (whichfork == XFS_COW_FORK) - bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC; + bma.flags |= XFS_BMAPI_COWFORK; if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) bma.prev.br_startoff = NULLFILEOFF; @@ -4682,7 +4639,7 @@ xfs_bmapi_remap( ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); - if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) || + if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { return -EFSCORRUPTED; } @@ -4726,9 +4683,9 @@ xfs_bmapi_remap( error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); error0: - if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) + if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS) logflags &= ~XFS_ILOG_DEXT; - else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) + else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE) logflags &= ~XFS_ILOG_DBROOT; if (logflags) @@ -5078,9 +5035,8 @@ xfs_bmap_del_extent_real( * conversion to btree format, since the transaction will be dirty then. */ if (tp->t_blk_res == 0 && - XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && - XFS_IFORK_NEXTENTS(ip, whichfork) >= - XFS_IFORK_MAXEXT(ip, whichfork) && + ifp->if_format == XFS_DINODE_FMT_EXTENTS && + ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) && del->br_startoff > got.br_startoff && del_endoff < got_endoff) return -ENOSPC; @@ -5132,8 +5088,8 @@ xfs_bmap_del_extent_real( */ xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) - 1); + ifp->if_nextents--; + flags |= XFS_ILOG_CORE; if (!cur) { flags |= xfs_ilog_fext(whichfork); @@ -5241,8 +5197,8 @@ xfs_bmap_del_extent_real( } } else flags |= xfs_ilog_fext(whichfork); - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + + ifp->if_nextents++; xfs_iext_next(ifp, icur); xfs_iext_insert(ip, icur, &new, state); break; @@ -5322,7 +5278,7 @@ __xfs_bunmapi( whichfork = xfs_bmapi_whichfork(flags); ASSERT(whichfork != XFS_COW_FORK); ifp = XFS_IFORK_PTR(ip, whichfork); - if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork))) + if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) return -EFSCORRUPTED; if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; @@ -5360,7 +5316,7 @@ __xfs_bunmapi( logflags = 0; if (ifp->if_flags & XFS_IFBROOT) { - ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); + ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); cur->bc_ino.flags = 0; } else @@ -5605,10 +5561,10 @@ error0: * logging the extent records if we've converted to btree format. */ if ((logflags & xfs_ilog_fext(whichfork)) && - XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + ifp->if_format != XFS_DINODE_FMT_EXTENTS) logflags &= ~xfs_ilog_fext(whichfork); else if ((logflags & xfs_ilog_fbroot(whichfork)) && - XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) + ifp->if_format != XFS_DINODE_FMT_BTREE) logflags &= ~xfs_ilog_fbroot(whichfork); /* * Log inode even in the error case, if the transaction @@ -5690,6 +5646,7 @@ xfs_bmse_merge( struct xfs_btree_cur *cur, int *logflags) /* output */ { + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_bmbt_irec new; xfs_filblks_t blockcount; int error, i; @@ -5708,8 +5665,7 @@ xfs_bmse_merge( * Update the on-disk extent count, the btree if necessary and log the * inode. */ - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) - 1); + ifp->if_nextents--; *logflags |= XFS_ILOG_CORE; if (!cur) { *logflags |= XFS_ILOG_DEXT; @@ -5747,7 +5703,7 @@ xfs_bmse_merge( done: xfs_iext_remove(ip, icur, 0); - xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur); + xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, &new); @@ -5819,7 +5775,7 @@ xfs_bmap_collapse_extents( int error = 0; int logflags = 0; - if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) || + if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { return -EFSCORRUPTED; } @@ -5936,7 +5892,7 @@ xfs_bmap_insert_extents( int error = 0; int logflags = 0; - if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) || + if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { return -EFSCORRUPTED; } @@ -6030,18 +5986,18 @@ xfs_bmap_split_extent( xfs_fileoff_t split_fsb) { int whichfork = XFS_DATA_FORK; + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_btree_cur *cur = NULL; struct xfs_bmbt_irec got; struct xfs_bmbt_irec new; /* split extent */ struct xfs_mount *mp = ip->i_mount; - struct xfs_ifork *ifp; xfs_fsblock_t gotblkcnt; /* new block count for got */ struct xfs_iext_cursor icur; int error = 0; int logflags = 0; int i = 0; - if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) || + if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { return -EFSCORRUPTED; } @@ -6049,7 +6005,6 @@ xfs_bmap_split_extent( if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; - ifp = XFS_IFORK_PTR(ip, whichfork); if (!(ifp->if_flags & XFS_IFEXTENTS)) { /* Read in all the extents */ error = xfs_iread_extents(tp, ip, whichfork); @@ -6097,8 +6052,7 @@ xfs_bmap_split_extent( /* Add new extent */ xfs_iext_next(ifp, &icur); xfs_iext_insert(ip, &icur, &new, 0); - XFS_IFORK_NEXT_SET(ip, whichfork, - XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + ifp->if_nextents++; if (cur) { error = xfs_bmbt_lookup_eq(cur, &new, &i); diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index f3259ad5c22c..6028a3c825ba 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 295a59cf8840..d9c63f17d2de 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -636,10 +636,7 @@ xfs_bmbt_change_owner( ASSERT(tp || buffer_list); ASSERT(!(tp && buffer_list)); - if (whichfork == XFS_DATA_FORK) - ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE); - else - ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE); + ASSERT(XFS_IFORK_PTR(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE); cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); if (!cur) diff --git a/fs/xfs/libxfs/xfs_bmap_btree.h b/fs/xfs/libxfs/xfs_bmap_btree.h index 29b407d053b4..72bf74c79fb9 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.h +++ b/fs/xfs/libxfs/xfs_bmap_btree.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000,2002-2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h index 8626c5a81aad..10e50cbacacf 100644 --- a/fs/xfs/libxfs/xfs_btree.h +++ b/fs/xfs/libxfs/xfs_btree.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h index 53e503b6f186..6e25de6621e4 100644 --- a/fs/xfs/libxfs/xfs_da_btree.h +++ b/fs/xfs/libxfs/xfs_da_btree.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h index 08c0a4d98b89..059ac108b1b3 100644 --- a/fs/xfs/libxfs/xfs_da_format.h +++ b/fs/xfs/libxfs/xfs_da_format.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index 22557527cfdb..d8f586256add 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c @@ -178,6 +178,18 @@ static const struct xfs_defer_op_type *defer_op_types[] = { [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type, }; +static void +xfs_defer_create_intent( + struct xfs_trans *tp, + struct xfs_defer_pending *dfp, + bool sort) +{ + const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type]; + + dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work, + dfp->dfp_count, sort); +} + /* * For each pending item in the intake list, log its intent item and the * associated extents, then add the entire intake list to the end of @@ -187,17 +199,11 @@ STATIC void xfs_defer_create_intents( struct xfs_trans *tp) { - struct list_head *li; struct xfs_defer_pending *dfp; - const struct xfs_defer_op_type *ops; list_for_each_entry(dfp, &tp->t_dfops, dfp_list) { - ops = defer_op_types[dfp->dfp_type]; - dfp->dfp_intent = ops->create_intent(tp, dfp->dfp_count); trace_xfs_defer_create_intent(tp->t_mountp, dfp); - list_sort(tp->t_mountp, &dfp->dfp_work, ops->diff_items); - list_for_each(li, &dfp->dfp_work) - ops->log_item(tp, dfp->dfp_intent, li); + xfs_defer_create_intent(tp, dfp, true); } } @@ -234,10 +240,13 @@ xfs_defer_trans_roll( struct xfs_log_item *lip; struct xfs_buf *bplist[XFS_DEFER_OPS_NR_BUFS]; struct xfs_inode *iplist[XFS_DEFER_OPS_NR_INODES]; + unsigned int ordered = 0; /* bitmap */ int bpcount = 0, ipcount = 0; int i; int error; + BUILD_BUG_ON(NBBY * sizeof(ordered) < XFS_DEFER_OPS_NR_BUFS); + list_for_each_entry(lip, &tp->t_items, li_trans) { switch (lip->li_type) { case XFS_LI_BUF: @@ -248,7 +257,10 @@ xfs_defer_trans_roll( ASSERT(0); return -EFSCORRUPTED; } - xfs_trans_dirty_buf(tp, bli->bli_buf); + if (bli->bli_flags & XFS_BLI_ORDERED) + ordered |= (1U << bpcount); + else + xfs_trans_dirty_buf(tp, bli->bli_buf); bplist[bpcount++] = bli->bli_buf; } break; @@ -289,6 +301,8 @@ xfs_defer_trans_roll( /* Rejoin the buffers and dirty them so the log moves forward. */ for (i = 0; i < bpcount; i++) { xfs_trans_bjoin(tp, bplist[i]); + if (ordered & (1U << i)) + xfs_trans_ordered_buf(tp, bplist[i]); xfs_trans_bhold(tp, bplist[i]); } @@ -346,6 +360,53 @@ xfs_defer_cancel_list( } /* + * Log an intent-done item for the first pending intent, and finish the work + * items. + */ +static int +xfs_defer_finish_one( + struct xfs_trans *tp, + struct xfs_defer_pending *dfp) +{ + const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type]; + struct xfs_btree_cur *state = NULL; + struct list_head *li, *n; + int error; + + trace_xfs_defer_pending_finish(tp->t_mountp, dfp); + + dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count); + list_for_each_safe(li, n, &dfp->dfp_work) { + list_del(li); + dfp->dfp_count--; + error = ops->finish_item(tp, dfp->dfp_done, li, &state); + if (error == -EAGAIN) { + /* + * Caller wants a fresh transaction; put the work item + * back on the list and log a new log intent item to + * replace the old one. See "Requesting a Fresh + * Transaction while Finishing Deferred Work" above. + */ + list_add(li, &dfp->dfp_work); + dfp->dfp_count++; + dfp->dfp_done = NULL; + xfs_defer_create_intent(tp, dfp, false); + } + + if (error) + goto out; + } + + /* Done with the dfp, free it. */ + list_del(&dfp->dfp_list); + kmem_free(dfp); +out: + if (ops->finish_cleanup) + ops->finish_cleanup(tp, state, error); + return error; +} + +/* * Finish all the pending work. This involves logging intent items for * any work items that wandered in since the last transaction roll (if * one has even happened), rolling the transaction, and finishing the @@ -358,11 +419,7 @@ xfs_defer_finish_noroll( struct xfs_trans **tp) { struct xfs_defer_pending *dfp; - struct list_head *li; - struct list_head *n; - void *state; int error = 0; - const struct xfs_defer_op_type *ops; LIST_HEAD(dop_pending); ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); @@ -371,87 +428,30 @@ xfs_defer_finish_noroll( /* Until we run out of pending work to finish... */ while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) { - /* log intents and pull in intake items */ xfs_defer_create_intents(*tp); list_splice_tail_init(&(*tp)->t_dfops, &dop_pending); - /* - * Roll the transaction. - */ error = xfs_defer_trans_roll(tp); if (error) - goto out; + goto out_shutdown; - /* Log an intent-done item for the first pending item. */ dfp = list_first_entry(&dop_pending, struct xfs_defer_pending, dfp_list); - ops = defer_op_types[dfp->dfp_type]; - trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp); - dfp->dfp_done = ops->create_done(*tp, dfp->dfp_intent, - dfp->dfp_count); - - /* Finish the work items. */ - state = NULL; - list_for_each_safe(li, n, &dfp->dfp_work) { - list_del(li); - dfp->dfp_count--; - error = ops->finish_item(*tp, li, dfp->dfp_done, - &state); - if (error == -EAGAIN) { - /* - * Caller wants a fresh transaction; - * put the work item back on the list - * and jump out. - */ - list_add(li, &dfp->dfp_work); - dfp->dfp_count++; - break; - } else if (error) { - /* - * Clean up after ourselves and jump out. - * xfs_defer_cancel will take care of freeing - * all these lists and stuff. - */ - if (ops->finish_cleanup) - ops->finish_cleanup(*tp, state, error); - goto out; - } - } - if (error == -EAGAIN) { - /* - * Caller wants a fresh transaction, so log a - * new log intent item to replace the old one - * and roll the transaction. See "Requesting - * a Fresh Transaction while Finishing - * Deferred Work" above. - */ - dfp->dfp_intent = ops->create_intent(*tp, - dfp->dfp_count); - dfp->dfp_done = NULL; - list_for_each(li, &dfp->dfp_work) - ops->log_item(*tp, dfp->dfp_intent, li); - } else { - /* Done with the dfp, free it. */ - list_del(&dfp->dfp_list); - kmem_free(dfp); - } - - if (ops->finish_cleanup) - ops->finish_cleanup(*tp, state, error); - } - -out: - if (error) { - xfs_defer_trans_abort(*tp, &dop_pending); - xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE); - trace_xfs_defer_finish_error(*tp, error); - xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending); - xfs_defer_cancel(*tp); - return error; + error = xfs_defer_finish_one(*tp, dfp); + if (error && error != -EAGAIN) + goto out_shutdown; } trace_xfs_defer_finish_done(*tp, _RET_IP_); return 0; + +out_shutdown: + xfs_defer_trans_abort(*tp, &dop_pending); + xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE); + trace_xfs_defer_finish_error(*tp, error); + xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending); + xfs_defer_cancel(*tp); + return error; } int diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h index 7c28d7608ac6..6b2ca580f2b0 100644 --- a/fs/xfs/libxfs/xfs_defer.h +++ b/fs/xfs/libxfs/xfs_defer.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2016 Oracle. All Rights Reserved. * Author: Darrick J. Wong <darrick.wong@oracle.com> @@ -6,6 +6,7 @@ #ifndef __XFS_DEFER_H__ #define __XFS_DEFER_H__ +struct xfs_btree_cur; struct xfs_defer_op_type; /* @@ -28,8 +29,8 @@ enum xfs_defer_ops_type { struct xfs_defer_pending { struct list_head dfp_list; /* pending items */ struct list_head dfp_work; /* work items */ - void *dfp_intent; /* log intent item */ - void *dfp_done; /* log done item */ + struct xfs_log_item *dfp_intent; /* log intent item */ + struct xfs_log_item *dfp_done; /* log done item */ unsigned int dfp_count; /* # extent items */ enum xfs_defer_ops_type dfp_type; }; @@ -43,15 +44,16 @@ void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp); /* Description of a deferred type. */ struct xfs_defer_op_type { - void (*abort_intent)(void *); - void *(*create_done)(struct xfs_trans *, void *, unsigned int); - int (*finish_item)(struct xfs_trans *, struct list_head *, void *, - void **); - void (*finish_cleanup)(struct xfs_trans *, void *, int); - void (*cancel_item)(struct list_head *); - int (*diff_items)(void *, struct list_head *, struct list_head *); - void *(*create_intent)(struct xfs_trans *, uint); - void (*log_item)(struct xfs_trans *, void *, struct list_head *); + struct xfs_log_item *(*create_intent)(struct xfs_trans *tp, + struct list_head *items, unsigned int count, bool sort); + void (*abort_intent)(struct xfs_log_item *intent); + struct xfs_log_item *(*create_done)(struct xfs_trans *tp, + struct xfs_log_item *intent, unsigned int count); + int (*finish_item)(struct xfs_trans *tp, struct xfs_log_item *done, + struct list_head *item, struct xfs_btree_cur **state); + void (*finish_cleanup)(struct xfs_trans *tp, + struct xfs_btree_cur *state, int error); + void (*cancel_item)(struct list_head *item); unsigned int max_items; }; diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index dd6fcaaea318..612a9c5e41b1 100644 --- a/fs/xfs/libxfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c @@ -278,7 +278,7 @@ xfs_dir_createname( if (!inum) args->op_flags |= XFS_DA_OP_JUSTCHECK; - if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) { rval = xfs_dir2_sf_addname(args); goto out_free; } @@ -373,7 +373,7 @@ xfs_dir_lookup( args->op_flags |= XFS_DA_OP_CILOOKUP; lock_mode = xfs_ilock_data_map_shared(dp); - if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) { rval = xfs_dir2_sf_lookup(args); goto out_check_rval; } @@ -443,7 +443,7 @@ xfs_dir_removename( args->whichfork = XFS_DATA_FORK; args->trans = tp; - if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) { rval = xfs_dir2_sf_removename(args); goto out_free; } @@ -504,7 +504,7 @@ xfs_dir_replace( args->whichfork = XFS_DATA_FORK; args->trans = tp; - if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) { rval = xfs_dir2_sf_replace(args); goto out_free; } diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index 033777e282f2..e55378640b05 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c index 1dbf2f980a26..5b59d3f7746b 100644 --- a/fs/xfs/libxfs/xfs_dir2_block.c +++ b/fs/xfs/libxfs/xfs_dir2_block.c @@ -1104,7 +1104,7 @@ xfs_dir2_sf_to_block( ASSERT(ifp->if_bytes == dp->i_d.di_size); ASSERT(ifp->if_u1.if_data != NULL); ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count)); - ASSERT(dp->i_d.di_nextents == 0); + ASSERT(dp->i_df.if_nextents == 0); /* * Copy the directory into a temporary buffer. diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h index 01ee0b926572..44c6a77cba05 100644 --- a/fs/xfs/libxfs/xfs_dir2_priv.h +++ b/fs/xfs/libxfs/xfs_dir2_priv.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 7b7f6fb2ea3b..2463b5d73447 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -343,7 +343,7 @@ xfs_dir2_block_to_sf( */ ASSERT(dp->i_df.if_bytes == 0); xfs_init_local_fork(dp, XFS_DATA_FORK, sfp, size); - dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; + dp->i_df.if_format = XFS_DINODE_FMT_LOCAL; dp->i_d.di_size = size; logflags |= XFS_ILOG_DDATA; @@ -710,11 +710,11 @@ xfs_dir2_sf_verify( struct xfs_inode *ip) { struct xfs_mount *mp = ip->i_mount; + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); struct xfs_dir2_sf_hdr *sfp; struct xfs_dir2_sf_entry *sfep; struct xfs_dir2_sf_entry *next_sfep; char *endp; - struct xfs_ifork *ifp; xfs_ino_t ino; int i; int i8count; @@ -723,9 +723,8 @@ xfs_dir2_sf_verify( int error; uint8_t filetype; - ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL); + ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); - ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data; size = ifp->if_bytes; @@ -827,9 +826,9 @@ xfs_dir2_sf_create( * If it's currently a zero-length extent file, * convert it to local format. */ - if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) { + if (dp->i_df.if_format == XFS_DINODE_FMT_EXTENTS) { dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */ - dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; + dp->i_df.if_format = XFS_DINODE_FMT_LOCAL; xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE); dp->i_df.if_flags |= XFS_IFINLINE; } @@ -1027,7 +1026,7 @@ xfs_dir2_sf_replace_needblock( int newsize; struct xfs_dir2_sf_hdr *sfp; - if (dp->i_d.di_format != XFS_DINODE_FMT_LOCAL) + if (dp->i_df.if_format != XFS_DINODE_FMT_LOCAL) return false; sfp = (struct xfs_dir2_sf_hdr *)dp->i_df.if_u1.if_data; diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h index 79e6c4fb1d8a..53b305dea381 100644 --- a/fs/xfs/libxfs/xfs_errortag.h +++ b/fs/xfs/libxfs/xfs_errortag.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * Copyright (C) 2017 Oracle. @@ -55,7 +55,8 @@ #define XFS_ERRTAG_FORCE_SCRUB_REPAIR 32 #define XFS_ERRTAG_FORCE_SUMMARY_RECALC 33 #define XFS_ERRTAG_IUNLINK_FALLBACK 34 -#define XFS_ERRTAG_MAX 35 +#define XFS_ERRTAG_BUF_IOERROR 35 +#define XFS_ERRTAG_MAX 36 /* * Random factors for above tags, 1 means always, 2 means 1/2 time, etc. @@ -95,5 +96,6 @@ #define XFS_RANDOM_FORCE_SCRUB_REPAIR 1 #define XFS_RANDOM_FORCE_SUMMARY_RECALC 1 #define XFS_RANDOM_IUNLINK_FALLBACK (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_BUF_IOERROR XFS_RANDOM_DEFAULT #endif /* __XFS_ERRORTAG_H_ */ diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 045556e78ee2..b42a52bfa1e9 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. @@ -964,13 +964,12 @@ enum xfs_dinode_fmt { /* * Inode data & attribute fork sizes, per inode. */ -#define XFS_DFORK_Q(dip) ((dip)->di_forkoff != 0) #define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3)) #define XFS_DFORK_DSIZE(dip,mp) \ - (XFS_DFORK_Q(dip) ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp)) + ((dip)->di_forkoff ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp)) #define XFS_DFORK_ASIZE(dip,mp) \ - (XFS_DFORK_Q(dip) ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0) + ((dip)->di_forkoff ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0) #define XFS_DFORK_SIZE(dip,mp,w) \ ((w) == XFS_DATA_FORK ? \ XFS_DFORK_DSIZE(dip, mp) : \ @@ -1681,7 +1680,7 @@ struct xfs_acl_entry { struct xfs_acl { __be32 acl_cnt; - struct xfs_acl_entry acl_entry[0]; + struct xfs_acl_entry acl_entry[]; }; /* diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h index 245188e4f6d3..84bcffa87753 100644 --- a/fs/xfs/libxfs/xfs_fs.h +++ b/fs/xfs/libxfs/xfs_fs.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: LGPL-2.1 +/* SPDX-License-Identifier: LGPL-2.1 */ /* * Copyright (c) 1995-2005 Silicon Graphics, Inc. * All Rights Reserved. diff --git a/fs/xfs/libxfs/xfs_health.h b/fs/xfs/libxfs/xfs_health.h index 272005ac8c88..99e796256c5d 100644 --- a/fs/xfs/libxfs/xfs_health.h +++ b/fs/xfs/libxfs/xfs_health.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2019 Oracle. All Rights Reserved. * Author: Darrick J. Wong <darrick.wong@oracle.com> diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 39c5a6e24915..6f84ea85fdd8 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -161,8 +161,7 @@ xfs_imap_to_bp( struct xfs_imap *imap, struct xfs_dinode **dipp, struct xfs_buf **bpp, - uint buf_flags, - uint iget_flags) + uint buf_flags) { struct xfs_buf *bp; int error; @@ -172,12 +171,7 @@ xfs_imap_to_bp( (int)imap->im_len, buf_flags, &bp, &xfs_inode_buf_ops); if (error) { - if (error == -EAGAIN) { - ASSERT(buf_flags & XBF_TRYLOCK); - return error; - } - xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.", - __func__, error); + ASSERT(error != -EAGAIN || (buf_flags & XBF_TRYLOCK)); return error; } @@ -186,13 +180,36 @@ xfs_imap_to_bp( return 0; } -void +int xfs_inode_from_disk( struct xfs_inode *ip, struct xfs_dinode *from) { struct xfs_icdinode *to = &ip->i_d; struct inode *inode = VFS_I(ip); + int error; + xfs_failaddr_t fa; + + ASSERT(ip->i_cowfp == NULL); + ASSERT(ip->i_afp == NULL); + + fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from); + if (fa) { + xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from, + sizeof(*from), fa); + return -EFSCORRUPTED; + } + + /* + * First get the permanent information that is needed to allocate an + * inode. If the inode is unused, mode is zero and we shouldn't mess + * with the unitialized part of it. + */ + to->di_flushiter = be16_to_cpu(from->di_flushiter); + inode->i_generation = be32_to_cpu(from->di_gen); + inode->i_mode = be16_to_cpu(from->di_mode); + if (!inode->i_mode) + return 0; /* * Convert v1 inodes immediately to v2 inode format as this is the @@ -208,10 +225,8 @@ xfs_inode_from_disk( be16_to_cpu(from->di_projid_lo); } - to->di_format = from->di_format; i_uid_write(inode, be32_to_cpu(from->di_uid)); i_gid_write(inode, be32_to_cpu(from->di_gid)); - to->di_flushiter = be16_to_cpu(from->di_flushiter); /* * Time is signed, so need to convert to signed 32 bit before @@ -225,16 +240,11 @@ xfs_inode_from_disk( inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec); inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec); inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec); - inode->i_generation = be32_to_cpu(from->di_gen); - inode->i_mode = be16_to_cpu(from->di_mode); to->di_size = be64_to_cpu(from->di_size); to->di_nblocks = be64_to_cpu(from->di_nblocks); to->di_extsize = be32_to_cpu(from->di_extsize); - to->di_nextents = be32_to_cpu(from->di_nextents); - to->di_anextents = be16_to_cpu(from->di_anextents); to->di_forkoff = from->di_forkoff; - to->di_aformat = from->di_aformat; to->di_dmevmask = be32_to_cpu(from->di_dmevmask); to->di_dmstate = be16_to_cpu(from->di_dmstate); to->di_flags = be16_to_cpu(from->di_flags); @@ -247,6 +257,22 @@ xfs_inode_from_disk( to->di_flags2 = be64_to_cpu(from->di_flags2); to->di_cowextsize = be32_to_cpu(from->di_cowextsize); } + + error = xfs_iformat_data_fork(ip, from); + if (error) + return error; + if (from->di_forkoff) { + error = xfs_iformat_attr_fork(ip, from); + if (error) + goto out_destroy_data_fork; + } + if (xfs_is_reflink_inode(ip)) + xfs_ifork_init_cow(ip); + return 0; + +out_destroy_data_fork: + xfs_idestroy_fork(&ip->i_df); + return error; } void @@ -261,7 +287,7 @@ xfs_inode_to_disk( to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); to->di_onlink = 0; - to->di_format = from->di_format; + to->di_format = xfs_ifork_format(&ip->i_df); to->di_uid = cpu_to_be32(i_uid_read(inode)); to->di_gid = cpu_to_be32(i_gid_read(inode)); to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff); @@ -281,10 +307,10 @@ xfs_inode_to_disk( to->di_size = cpu_to_be64(from->di_size); to->di_nblocks = cpu_to_be64(from->di_nblocks); to->di_extsize = cpu_to_be32(from->di_extsize); - to->di_nextents = cpu_to_be32(from->di_nextents); - to->di_anextents = cpu_to_be16(from->di_anextents); + to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df)); + to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp)); to->di_forkoff = from->di_forkoff; - to->di_aformat = from->di_aformat; + to->di_aformat = xfs_ifork_format(ip->i_afp); to->di_dmevmask = cpu_to_be32(from->di_dmevmask); to->di_dmstate = cpu_to_be16(from->di_dmstate); to->di_flags = cpu_to_be16(from->di_flags); @@ -405,7 +431,7 @@ xfs_dinode_verify_forkoff( struct xfs_dinode *dip, struct xfs_mount *mp) { - if (!XFS_DFORK_Q(dip)) + if (!dip->di_forkoff) return NULL; switch (dip->di_format) { @@ -508,7 +534,7 @@ xfs_dinode_verify( return __this_address; } - if (XFS_DFORK_Q(dip)) { + if (dip->di_forkoff) { fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK); if (fa) return fa; @@ -585,122 +611,6 @@ xfs_dinode_calc_crc( } /* - * Read the disk inode attributes into the in-core inode structure. - * - * For version 5 superblocks, if we are initialising a new inode and we are not - * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new - * inode core with a random generation number. If we are keeping inodes around, - * we need to read the inode cluster to get the existing generation number off - * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode - * format) then log recovery is dependent on the di_flushiter field being - * initialised from the current on-disk value and hence we must also read the - * inode off disk. - */ -int -xfs_iread( - xfs_mount_t *mp, - xfs_trans_t *tp, - xfs_inode_t *ip, - uint iget_flags) -{ - xfs_buf_t *bp; - xfs_dinode_t *dip; - xfs_failaddr_t fa; - int error; - - /* - * Fill in the location information in the in-core inode. - */ - error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); - if (error) - return error; - - /* shortcut IO on inode allocation if possible */ - if ((iget_flags & XFS_IGET_CREATE) && - xfs_sb_version_has_v3inode(&mp->m_sb) && - !(mp->m_flags & XFS_MOUNT_IKEEP)) { - VFS_I(ip)->i_generation = prandom_u32(); - return 0; - } - - /* - * Get pointers to the on-disk inode and the buffer containing it. - */ - error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags); - if (error) - return error; - - /* even unallocated inodes are verified */ - fa = xfs_dinode_verify(mp, ip->i_ino, dip); - if (fa) { - xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip, - sizeof(*dip), fa); - error = -EFSCORRUPTED; - goto out_brelse; - } - - /* - * If the on-disk inode is already linked to a directory - * entry, copy all of the inode into the in-core inode. - * xfs_iformat_fork() handles copying in the inode format - * specific information. - * Otherwise, just get the truly permanent information. - */ - if (dip->di_mode) { - xfs_inode_from_disk(ip, dip); - error = xfs_iformat_fork(ip, dip); - if (error) { -#ifdef DEBUG - xfs_alert(mp, "%s: xfs_iformat() returned error %d", - __func__, error); -#endif /* DEBUG */ - goto out_brelse; - } - } else { - /* - * Partial initialisation of the in-core inode. Just the bits - * that xfs_ialloc won't overwrite or relies on being correct. - */ - VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen); - ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); - - /* - * Make sure to pull in the mode here as well in - * case the inode is released without being used. - * This ensures that xfs_inactive() will see that - * the inode is already free and not try to mess - * with the uninitialized part of it. - */ - VFS_I(ip)->i_mode = 0; - } - - ip->i_delayed_blks = 0; - - /* - * Mark the buffer containing the inode as something to keep - * around for a while. This helps to keep recently accessed - * meta-data in-core longer. - */ - xfs_buf_set_ref(bp, XFS_INO_REF); - - /* - * Use xfs_trans_brelse() to release the buffer containing the on-disk - * inode, because it was acquired with xfs_trans_read_buf() in - * xfs_imap_to_bp() above. If tp is NULL, this is just a normal - * brelse(). If we're within a transaction, then xfs_trans_brelse() - * will only release the buffer if it is not dirty within the - * transaction. It will be OK to release the buffer in this case, - * because inodes on disk are never destroyed and we will be locking the - * new in-core inode before putting it in the cache where other - * processes can find it. Thus we don't have to worry about the inode - * being changed just because we released the buffer. - */ - out_brelse: - xfs_trans_brelse(tp, bp); - return error; -} - -/* * Validate di_extsize hint. * * The rules are documented at xfs_ioctl_setattr_check_extsize(). diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h index 9b373dcf9e34..865ac493c72a 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.h +++ b/fs/xfs/libxfs/xfs_inode_buf.h @@ -16,16 +16,12 @@ struct xfs_dinode; * format specific structures at the appropriate time. */ struct xfs_icdinode { - int8_t di_format; /* format of di_c data */ uint16_t di_flushiter; /* incremented on flush */ uint32_t di_projid; /* owner's project id */ xfs_fsize_t di_size; /* number of bytes in file */ xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */ xfs_extlen_t di_extsize; /* basic/minimum extent size for file */ - xfs_extnum_t di_nextents; /* number of extents in data fork */ - xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/ uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */ - int8_t di_aformat; /* format of attr fork's data */ uint32_t di_dmevmask; /* DMIG event mask */ uint16_t di_dmstate; /* DMIG state info */ uint16_t di_flags; /* random flags, XFS_DIFLAG_... */ @@ -48,13 +44,11 @@ struct xfs_imap { int xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *, struct xfs_imap *, struct xfs_dinode **, - struct xfs_buf **, uint, uint); -int xfs_iread(struct xfs_mount *, struct xfs_trans *, - struct xfs_inode *, uint); + struct xfs_buf **, uint); void xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *); void xfs_inode_to_disk(struct xfs_inode *ip, struct xfs_dinode *to, xfs_lsn_t lsn); -void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from); +int xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from); void xfs_log_dinode_to_disk(struct xfs_log_dinode *from, struct xfs_dinode *to); diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 518c6f0ec3a6..28b366275ae0 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -26,110 +26,6 @@ kmem_zone_t *xfs_ifork_zone; -STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); -STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); -STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); - -/* - * Copy inode type and data and attr format specific information from the - * on-disk inode to the in-core inode and fork structures. For fifos, devices, - * and sockets this means set i_rdev to the proper value. For files, - * directories, and symlinks this means to bring in the in-line data or extent - * pointers as well as the attribute fork. For a fork in B-tree format, only - * the root is immediately brought in-core. The rest will be read in later when - * first referenced (see xfs_iread_extents()). - */ -int -xfs_iformat_fork( - struct xfs_inode *ip, - struct xfs_dinode *dip) -{ - struct inode *inode = VFS_I(ip); - struct xfs_attr_shortform *atp; - int size; - int error = 0; - xfs_fsize_t di_size; - - switch (inode->i_mode & S_IFMT) { - case S_IFIFO: - case S_IFCHR: - case S_IFBLK: - case S_IFSOCK: - ip->i_d.di_size = 0; - inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip)); - break; - - case S_IFREG: - case S_IFLNK: - case S_IFDIR: - switch (dip->di_format) { - case XFS_DINODE_FMT_LOCAL: - di_size = be64_to_cpu(dip->di_size); - size = (int)di_size; - error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); - break; - case XFS_DINODE_FMT_EXTENTS: - error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); - break; - case XFS_DINODE_FMT_BTREE: - error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); - break; - default: - xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, - dip, sizeof(*dip), __this_address); - return -EFSCORRUPTED; - } - break; - - default: - xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, - sizeof(*dip), __this_address); - return -EFSCORRUPTED; - } - if (error) - return error; - - if (xfs_is_reflink_inode(ip)) { - ASSERT(ip->i_cowfp == NULL); - xfs_ifork_init_cow(ip); - } - - if (!XFS_DFORK_Q(dip)) - return 0; - - ASSERT(ip->i_afp == NULL); - ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS); - - switch (dip->di_aformat) { - case XFS_DINODE_FMT_LOCAL: - atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); - size = be16_to_cpu(atp->hdr.totsize); - - error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); - break; - case XFS_DINODE_FMT_EXTENTS: - error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); - break; - case XFS_DINODE_FMT_BTREE: - error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); - break; - default: - xfs_inode_verifier_error(ip, error, __func__, dip, - sizeof(*dip), __this_address); - error = -EFSCORRUPTED; - break; - } - if (error) { - kmem_cache_free(xfs_ifork_zone, ip->i_afp); - ip->i_afp = NULL; - if (ip->i_cowfp) - kmem_cache_free(xfs_ifork_zone, ip->i_cowfp); - ip->i_cowfp = NULL; - xfs_idestroy_fork(ip, XFS_DATA_FORK); - } - return error; -} - void xfs_init_local_fork( struct xfs_inode *ip, @@ -292,12 +188,11 @@ xfs_iformat_btree( * or the number of extents is greater than the number of * blocks. */ - if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= - XFS_IFORK_MAXEXT(ip, whichfork) || + if (unlikely(ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork) || nrecs == 0 || XFS_BMDR_SPACE_CALC(nrecs) > XFS_DFORK_SIZE(dip, mp, whichfork) || - XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks) || + ifp->if_nextents > ip->i_d.di_nblocks) || level == 0 || level > XFS_BTREE_MAXLEVELS) { xfs_warn(mp, "corrupt inode %Lu (btree).", (unsigned long long) ip->i_ino); @@ -325,6 +220,110 @@ xfs_iformat_btree( return 0; } +int +xfs_iformat_data_fork( + struct xfs_inode *ip, + struct xfs_dinode *dip) +{ + struct inode *inode = VFS_I(ip); + int error; + + /* + * Initialize the extent count early, as the per-format routines may + * depend on it. + */ + ip->i_df.if_format = dip->di_format; + ip->i_df.if_nextents = be32_to_cpu(dip->di_nextents); + + switch (inode->i_mode & S_IFMT) { + case S_IFIFO: + case S_IFCHR: + case S_IFBLK: + case S_IFSOCK: + ip->i_d.di_size = 0; + inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip)); + return 0; + case S_IFREG: + case S_IFLNK: + case S_IFDIR: + switch (ip->i_df.if_format) { + case XFS_DINODE_FMT_LOCAL: + error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, + be64_to_cpu(dip->di_size)); + if (!error) + error = xfs_ifork_verify_local_data(ip); + return error; + case XFS_DINODE_FMT_EXTENTS: + return xfs_iformat_extents(ip, dip, XFS_DATA_FORK); + case XFS_DINODE_FMT_BTREE: + return xfs_iformat_btree(ip, dip, XFS_DATA_FORK); + default: + xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, + dip, sizeof(*dip), __this_address); + return -EFSCORRUPTED; + } + break; + default: + xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, + sizeof(*dip), __this_address); + return -EFSCORRUPTED; + } +} + +static uint16_t +xfs_dfork_attr_shortform_size( + struct xfs_dinode *dip) +{ + struct xfs_attr_shortform *atp = + (struct xfs_attr_shortform *)XFS_DFORK_APTR(dip); + + return be16_to_cpu(atp->hdr.totsize); +} + +int +xfs_iformat_attr_fork( + struct xfs_inode *ip, + struct xfs_dinode *dip) +{ + int error = 0; + + /* + * Initialize the extent count early, as the per-format routines may + * depend on it. + */ + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS); + ip->i_afp->if_format = dip->di_aformat; + if (unlikely(ip->i_afp->if_format == 0)) /* pre IRIX 6.2 file system */ + ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS; + ip->i_afp->if_nextents = be16_to_cpu(dip->di_anextents); + + switch (ip->i_afp->if_format) { + case XFS_DINODE_FMT_LOCAL: + error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, + xfs_dfork_attr_shortform_size(dip)); + if (!error) + error = xfs_ifork_verify_local_attr(ip); + break; + case XFS_DINODE_FMT_EXTENTS: + error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); + break; + case XFS_DINODE_FMT_BTREE: + error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); + break; + default: + xfs_inode_verifier_error(ip, error, __func__, dip, + sizeof(*dip), __this_address); + error = -EFSCORRUPTED; + break; + } + + if (error) { + kmem_cache_free(xfs_ifork_zone, ip->i_afp); + ip->i_afp = NULL; + } + return error; +} + /* * Reallocate the space for if_broot based on the number of records * being added or deleted as indicated in rec_diff. Move the records @@ -504,38 +503,24 @@ xfs_idata_realloc( void xfs_idestroy_fork( - xfs_inode_t *ip, - int whichfork) + struct xfs_ifork *ifp) { - struct xfs_ifork *ifp; - - ifp = XFS_IFORK_PTR(ip, whichfork); if (ifp->if_broot != NULL) { kmem_free(ifp->if_broot); ifp->if_broot = NULL; } /* - * If the format is local, then we can't have an extents - * array so just look for an inline data array. If we're - * not local then we may or may not have an extents list, - * so check and free it up if we do. + * If the format is local, then we can't have an extents array so just + * look for an inline data array. If we're not local then we may or may + * not have an extents list, so check and free it up if we do. */ - if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { - if (ifp->if_u1.if_data != NULL) { - kmem_free(ifp->if_u1.if_data); - ifp->if_u1.if_data = NULL; - } - } else if ((ifp->if_flags & XFS_IFEXTENTS) && ifp->if_height) { - xfs_iext_destroy(ifp); - } - - if (whichfork == XFS_ATTR_FORK) { - kmem_cache_free(xfs_ifork_zone, ip->i_afp); - ip->i_afp = NULL; - } else if (whichfork == XFS_COW_FORK) { - kmem_cache_free(xfs_ifork_zone, ip->i_cowfp); - ip->i_cowfp = NULL; + if (ifp->if_format == XFS_DINODE_FMT_LOCAL) { + kmem_free(ifp->if_u1.if_data); + ifp->if_u1.if_data = NULL; + } else if (ifp->if_flags & XFS_IFEXTENTS) { + if (ifp->if_height) + xfs_iext_destroy(ifp); } } @@ -592,7 +577,7 @@ void xfs_iflush_fork( xfs_inode_t *ip, xfs_dinode_t *dip, - xfs_inode_log_item_t *iip, + struct xfs_inode_log_item *iip, int whichfork) { char *cp; @@ -618,7 +603,7 @@ xfs_iflush_fork( } cp = XFS_DFORK_PTR(dip, whichfork); mp = ip->i_mount; - switch (XFS_IFORK_FORMAT(ip, whichfork)) { + switch (ifp->if_format) { case XFS_DINODE_FMT_LOCAL: if ((iip->ili_fields & dataflag[whichfork]) && (ifp->if_bytes > 0)) { @@ -633,7 +618,7 @@ xfs_iflush_fork( !(iip->ili_fields & extflag[whichfork])); if ((iip->ili_fields & extflag[whichfork]) && (ifp->if_bytes > 0)) { - ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); + ASSERT(ifp->if_nextents > 0); (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, whichfork); } @@ -691,48 +676,55 @@ xfs_ifork_init_cow( ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS); ip->i_cowfp->if_flags = XFS_IFEXTENTS; - ip->i_cformat = XFS_DINODE_FMT_EXTENTS; - ip->i_cnextents = 0; + ip->i_cowfp->if_format = XFS_DINODE_FMT_EXTENTS; } -/* Default fork content verifiers. */ -struct xfs_ifork_ops xfs_default_ifork_ops = { - .verify_attr = xfs_attr_shortform_verify, - .verify_dir = xfs_dir2_sf_verify, - .verify_symlink = xfs_symlink_shortform_verify, -}; - /* Verify the inline contents of the data fork of an inode. */ -xfs_failaddr_t -xfs_ifork_verify_data( - struct xfs_inode *ip, - struct xfs_ifork_ops *ops) +int +xfs_ifork_verify_local_data( + struct xfs_inode *ip) { - /* Non-local data fork, we're done. */ - if (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) - return NULL; + xfs_failaddr_t fa = NULL; - /* Check the inline data fork if there is one. */ switch (VFS_I(ip)->i_mode & S_IFMT) { case S_IFDIR: - return ops->verify_dir(ip); + fa = xfs_dir2_sf_verify(ip); + break; case S_IFLNK: - return ops->verify_symlink(ip); + fa = xfs_symlink_shortform_verify(ip); + break; default: - return NULL; + break; } + + if (fa) { + xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork", + ip->i_df.if_u1.if_data, ip->i_df.if_bytes, fa); + return -EFSCORRUPTED; + } + + return 0; } /* Verify the inline contents of the attr fork of an inode. */ -xfs_failaddr_t -xfs_ifork_verify_attr( - struct xfs_inode *ip, - struct xfs_ifork_ops *ops) +int +xfs_ifork_verify_local_attr( + struct xfs_inode *ip) { - /* There has to be an attr fork allocated if aformat is local. */ - if (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) - return NULL; - if (!XFS_IFORK_PTR(ip, XFS_ATTR_FORK)) - return __this_address; - return ops->verify_attr(ip); + struct xfs_ifork *ifp = ip->i_afp; + xfs_failaddr_t fa; + + if (!ifp) + fa = __this_address; + else + fa = xfs_attr_shortform_verify(ip); + + if (fa) { + xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork", + ifp ? ifp->if_u1.if_data : NULL, + ifp ? ifp->if_bytes : 0, fa); + return -EFSCORRUPTED; + } + + return 0; } diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index 668ee942be22..a4953e95c4f3 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h @@ -23,6 +23,8 @@ struct xfs_ifork { } if_u1; short if_broot_bytes; /* bytes allocated for root */ unsigned char if_flags; /* per-fork flags */ + int8_t if_format; /* format of this fork */ + xfs_extnum_t if_nextents; /* # of extents in this fork */ }; /* @@ -55,43 +57,36 @@ struct xfs_ifork { ((w) == XFS_ATTR_FORK ? \ XFS_IFORK_ASIZE(ip) : \ 0)) -#define XFS_IFORK_FORMAT(ip,w) \ - ((w) == XFS_DATA_FORK ? \ - (ip)->i_d.di_format : \ - ((w) == XFS_ATTR_FORK ? \ - (ip)->i_d.di_aformat : \ - (ip)->i_cformat)) -#define XFS_IFORK_FMT_SET(ip,w,n) \ - ((w) == XFS_DATA_FORK ? \ - ((ip)->i_d.di_format = (n)) : \ - ((w) == XFS_ATTR_FORK ? \ - ((ip)->i_d.di_aformat = (n)) : \ - ((ip)->i_cformat = (n)))) -#define XFS_IFORK_NEXTENTS(ip,w) \ - ((w) == XFS_DATA_FORK ? \ - (ip)->i_d.di_nextents : \ - ((w) == XFS_ATTR_FORK ? \ - (ip)->i_d.di_anextents : \ - (ip)->i_cnextents)) -#define XFS_IFORK_NEXT_SET(ip,w,n) \ - ((w) == XFS_DATA_FORK ? \ - ((ip)->i_d.di_nextents = (n)) : \ - ((w) == XFS_ATTR_FORK ? \ - ((ip)->i_d.di_anextents = (n)) : \ - ((ip)->i_cnextents = (n)))) #define XFS_IFORK_MAXEXT(ip, w) \ (XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t)) -#define xfs_ifork_has_extents(ip, w) \ - (XFS_IFORK_FORMAT((ip), (w)) == XFS_DINODE_FMT_EXTENTS || \ - XFS_IFORK_FORMAT((ip), (w)) == XFS_DINODE_FMT_BTREE) +static inline bool xfs_ifork_has_extents(struct xfs_ifork *ifp) +{ + return ifp->if_format == XFS_DINODE_FMT_EXTENTS || + ifp->if_format == XFS_DINODE_FMT_BTREE; +} + +static inline xfs_extnum_t xfs_ifork_nextents(struct xfs_ifork *ifp) +{ + if (!ifp) + return 0; + return ifp->if_nextents; +} + +static inline int8_t xfs_ifork_format(struct xfs_ifork *ifp) +{ + if (!ifp) + return XFS_DINODE_FMT_EXTENTS; + return ifp->if_format; +} struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); -int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); +int xfs_iformat_data_fork(struct xfs_inode *, struct xfs_dinode *); +int xfs_iformat_attr_fork(struct xfs_inode *, struct xfs_dinode *); void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, struct xfs_inode_log_item *, int); -void xfs_idestroy_fork(struct xfs_inode *, int); +void xfs_idestroy_fork(struct xfs_ifork *ifp); void xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff, int whichfork); void xfs_iroot_realloc(struct xfs_inode *, int, int); @@ -175,18 +170,7 @@ extern struct kmem_zone *xfs_ifork_zone; extern void xfs_ifork_init_cow(struct xfs_inode *ip); -typedef xfs_failaddr_t (*xfs_ifork_verifier_t)(struct xfs_inode *); - -struct xfs_ifork_ops { - xfs_ifork_verifier_t verify_symlink; - xfs_ifork_verifier_t verify_dir; - xfs_ifork_verifier_t verify_attr; -}; -extern struct xfs_ifork_ops xfs_default_ifork_ops; - -xfs_failaddr_t xfs_ifork_verify_data(struct xfs_inode *ip, - struct xfs_ifork_ops *ops); -xfs_failaddr_t xfs_ifork_verify_attr(struct xfs_inode *ip, - struct xfs_ifork_ops *ops); +int xfs_ifork_verify_local_data(struct xfs_inode *ip); +int xfs_ifork_verify_local_attr(struct xfs_inode *ip); #endif /* __XFS_INODE_FORK_H__ */ diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h index 3bf671637a91..641132d0e39d 100644 --- a/fs/xfs/libxfs/xfs_log_recover.h +++ b/fs/xfs/libxfs/xfs_log_recover.h @@ -7,6 +7,73 @@ #define __XFS_LOG_RECOVER_H__ /* + * Each log item type (XFS_LI_*) gets its own xlog_recover_item_ops to + * define how recovery should work for that type of log item. + */ +struct xlog_recover_item; + +/* Sorting hat for log items as they're read in. */ +enum xlog_recover_reorder { + XLOG_REORDER_BUFFER_LIST, + XLOG_REORDER_ITEM_LIST, + XLOG_REORDER_INODE_BUFFER_LIST, + XLOG_REORDER_CANCEL_LIST, +}; + +struct xlog_recover_item_ops { + uint16_t item_type; /* XFS_LI_* type code. */ + + /* + * Help sort recovered log items into the order required to replay them + * correctly. Log item types that always use XLOG_REORDER_ITEM_LIST do + * not have to supply a function here. See the comment preceding + * xlog_recover_reorder_trans for more details about what the return + * values mean. + */ + enum xlog_recover_reorder (*reorder)(struct xlog_recover_item *item); + + /* Start readahead for pass2, if provided. */ + void (*ra_pass2)(struct xlog *log, struct xlog_recover_item *item); + + /* Do whatever work we need to do for pass1, if provided. */ + int (*commit_pass1)(struct xlog *log, struct xlog_recover_item *item); + + /* + * This function should do whatever work is needed for pass2 of log + * recovery, if provided. + * + * If the recovered item is an intent item, this function should parse + * the recovered item to construct an in-core log intent item and + * insert it into the AIL. The in-core log intent item should have 1 + * refcount so that the item is freed either (a) when we commit the + * recovered log item for the intent-done item; (b) replay the work and + * log a new intent-done item; or (c) recovery fails and we have to + * abort. + * + * If the recovered item is an intent-done item, this function should + * parse the recovered item to find the id of the corresponding intent + * log item. Next, it should find the in-core log intent item in the + * AIL and release it. + */ + int (*commit_pass2)(struct xlog *log, struct list_head *buffer_list, + struct xlog_recover_item *item, xfs_lsn_t lsn); +}; + +extern const struct xlog_recover_item_ops xlog_icreate_item_ops; +extern const struct xlog_recover_item_ops xlog_buf_item_ops; +extern const struct xlog_recover_item_ops xlog_inode_item_ops; +extern const struct xlog_recover_item_ops xlog_dquot_item_ops; +extern const struct xlog_recover_item_ops xlog_quotaoff_item_ops; +extern const struct xlog_recover_item_ops xlog_bui_item_ops; +extern const struct xlog_recover_item_ops xlog_bud_item_ops; +extern const struct xlog_recover_item_ops xlog_efi_item_ops; +extern const struct xlog_recover_item_ops xlog_efd_item_ops; +extern const struct xlog_recover_item_ops xlog_rui_item_ops; +extern const struct xlog_recover_item_ops xlog_rud_item_ops; +extern const struct xlog_recover_item_ops xlog_cui_item_ops; +extern const struct xlog_recover_item_ops xlog_cud_item_ops; + +/* * Macros, structures, prototypes for internal log manager use. */ @@ -22,13 +89,13 @@ /* * item headers are in ri_buf[0]. Additional buffers follow. */ -typedef struct xlog_recover_item { +struct xlog_recover_item { struct list_head ri_list; - int ri_type; int ri_cnt; /* count of regions found */ int ri_total; /* total regions */ - xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */ -} xlog_recover_item_t; + struct xfs_log_iovec *ri_buf; /* ptr to regions buffer */ + const struct xlog_recover_item_ops *ri_ops; +}; struct xlog_recover { struct hlist_node r_list; @@ -51,4 +118,12 @@ struct xlog_recover { #define XLOG_RECOVER_PASS1 1 #define XLOG_RECOVER_PASS2 2 +void xlog_buf_readahead(struct xlog *log, xfs_daddr_t blkno, uint len, + const struct xfs_buf_ops *ops); +bool xlog_is_buffer_cancelled(struct xlog *log, xfs_daddr_t blkno, uint len); +void xlog_recover_iodone(struct xfs_buf *bp); + +void xlog_recover_release_intent(struct xlog *log, unsigned short intent_type, + uint64_t intent_id); + #endif /* __XFS_LOG_RECOVER_H__ */ diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h index b2113b17e53c..56d9dd787e7b 100644 --- a/fs/xfs/libxfs/xfs_quota_defs.h +++ b/fs/xfs/libxfs/xfs_quota_defs.h @@ -100,7 +100,6 @@ typedef uint16_t xfs_qwarncnt_t; #define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */ #define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */ #define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */ -#define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */ /* * flags to xfs_trans_mod_dquot to indicate which field needs to be diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c index f42c74cb8be5..9498ced947be 100644 --- a/fs/xfs/libxfs/xfs_rtbitmap.c +++ b/fs/xfs/libxfs/xfs_rtbitmap.c @@ -66,7 +66,7 @@ xfs_rtbuf_get( ip = issum ? mp->m_rsumip : mp->m_rbmip; - error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK); + error = xfs_bmapi_read(ip, block, 1, &map, &nmap, 0); if (error) return error; diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index c526c5e5ab76..4df87546bd40 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -243,7 +243,7 @@ xfs_validate_sb_common( } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD | XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) { xfs_notice(mp, -"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits."); +"Superblock earlier than Version 5 has XFS_{P|G}QUOTA_{ENFD|CHKD} bits."); return -EFSCORRUPTED; } diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c index 3b8260ca7d1b..594bc447a7dd 100644 --- a/fs/xfs/libxfs/xfs_symlink_remote.c +++ b/fs/xfs/libxfs/xfs_symlink_remote.c @@ -204,16 +204,12 @@ xfs_failaddr_t xfs_symlink_shortform_verify( struct xfs_inode *ip) { - char *sfp; - char *endp; - struct xfs_ifork *ifp; - int size; - - ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL); - ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); - sfp = (char *)ifp->if_u1.if_data; - size = ifp->if_bytes; - endp = sfp + size; + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + char *sfp = (char *)ifp->if_u1.if_data; + int size = ifp->if_bytes; + char *endp = sfp + size; + + ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); /* * Zero length symlinks should never occur in memory as they are diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index 2b8ccb5b975d..b5dfb6654842 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -27,7 +27,7 @@ xfs_trans_ijoin( struct xfs_inode *ip, uint lock_flags) { - xfs_inode_log_item_t *iip; + struct xfs_inode_log_item *iip; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (ip->i_itemp == NULL) diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index add8598eacd5..7badd6dfe544 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -566,8 +566,9 @@ xchk_bmap_check_rmaps( struct xfs_scrub *sc, int whichfork) { - loff_t size; + struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork); xfs_agnumber_t agno; + bool zero_size; int error; if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) || @@ -579,6 +580,8 @@ xchk_bmap_check_rmaps( if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK) return 0; + ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL); + /* * Only do this for complex maps that are in btree format, or for * situations where we would seem to have a size but zero extents. @@ -586,19 +589,14 @@ xchk_bmap_check_rmaps( * to flag this bmap as corrupt if there are rmaps that need to be * reattached. */ - switch (whichfork) { - case XFS_DATA_FORK: - size = i_size_read(VFS_I(sc->ip)); - break; - case XFS_ATTR_FORK: - size = XFS_IFORK_Q(sc->ip); - break; - default: - size = 0; - break; - } - if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE && - (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0)) + + if (whichfork == XFS_DATA_FORK) + zero_size = i_size_read(VFS_I(sc->ip)) == 0; + else + zero_size = false; + + if (ifp->if_format != XFS_DINODE_FMT_BTREE && + (zero_size || ifp->if_nextents > 0)) return 0; for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) { @@ -627,12 +625,14 @@ xchk_bmap( struct xchk_bmap_info info = { NULL }; struct xfs_mount *mp = sc->mp; struct xfs_inode *ip = sc->ip; - struct xfs_ifork *ifp; + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); xfs_fileoff_t endoff; struct xfs_iext_cursor icur; int error = 0; - ifp = XFS_IFORK_PTR(ip, whichfork); + /* Non-existent forks can be ignored. */ + if (!ifp) + goto out; info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip); info.whichfork = whichfork; @@ -641,9 +641,6 @@ xchk_bmap( switch (whichfork) { case XFS_COW_FORK: - /* Non-existent CoW forks are ignorable. */ - if (!ifp) - goto out; /* No CoW forks on non-reflink inodes/filesystems. */ if (!xfs_is_reflink_inode(ip)) { xchk_ino_set_corrupt(sc, sc->ip->i_ino); @@ -651,8 +648,6 @@ xchk_bmap( } break; case XFS_ATTR_FORK: - if (!ifp) - goto out_check_rmap; if (!xfs_sb_version_hasattr(&mp->m_sb) && !xfs_sb_version_hasattr2(&mp->m_sb)) xchk_ino_set_corrupt(sc, sc->ip->i_ino); @@ -663,7 +658,7 @@ xchk_bmap( } /* Check the fork values */ - switch (XFS_IFORK_FORMAT(ip, whichfork)) { + switch (ifp->if_format) { case XFS_DINODE_FMT_UUID: case XFS_DINODE_FMT_DEV: case XFS_DINODE_FMT_LOCAL: @@ -717,7 +712,6 @@ xchk_bmap( goto out; } -out_check_rmap: error = xchk_bmap_check_rmaps(sc, whichfork); if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error)) goto out; diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c index 9a2e27ac1300..44b15015021f 100644 --- a/fs/xfs/scrub/dabtree.c +++ b/fs/xfs/scrub/dabtree.c @@ -468,7 +468,7 @@ xchk_da_btree( int error; /* Skip short format data structures; no btree to scan. */ - if (!xfs_ifork_has_extents(sc->ip, whichfork)) + if (!xfs_ifork_has_extents(XFS_IFORK_PTR(sc->ip, whichfork))) return 0; /* Set up initial da state. */ diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c index fe2a6e030c8a..7c432997edad 100644 --- a/fs/xfs/scrub/dir.c +++ b/fs/xfs/scrub/dir.c @@ -635,7 +635,7 @@ xchk_directory_blocks( { struct xfs_bmbt_irec got; struct xfs_da_args args; - struct xfs_ifork *ifp; + struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK); struct xfs_mount *mp = sc->mp; xfs_fileoff_t leaf_lblk; xfs_fileoff_t free_lblk; @@ -647,11 +647,10 @@ xchk_directory_blocks( int error; /* Ignore local format directories. */ - if (sc->ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && - sc->ip->i_d.di_format != XFS_DINODE_FMT_BTREE) + if (ifp->if_format != XFS_DINODE_FMT_EXTENTS && + ifp->if_format != XFS_DINODE_FMT_BTREE) return 0; - ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK); lblk = XFS_B_TO_FSB(mp, XFS_DIR2_DATA_OFFSET); leaf_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_LEAF_OFFSET); free_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_FREE_OFFSET); diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c index 64c217eb06a7..6517d67e8d51 100644 --- a/fs/xfs/scrub/ialloc.c +++ b/fs/xfs/scrub/ialloc.c @@ -278,8 +278,7 @@ xchk_iallocbt_check_cluster( &XFS_RMAP_OINFO_INODES); /* Grab the inode cluster buffer. */ - error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp, - 0, 0); + error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp, 0); if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) return error; diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c index 5705adc43a75..855aa8bcab64 100644 --- a/fs/xfs/scrub/parent.c +++ b/fs/xfs/scrub/parent.c @@ -90,7 +90,7 @@ xchk_parent_count_parent_dentries( * if there is one. */ lock_mode = xfs_ilock_data_map_shared(parent); - if (parent->i_d.di_nextents > 0) + if (parent->i_df.if_nextents > 0) error = xfs_dir3_data_readahead(parent, 0, 0); xfs_iunlock(parent, lock_mode); if (error) diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 9d9cebf18726..b35611882ff9 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -382,7 +382,7 @@ xfs_map_blocks( */ retry: xfs_ilock(ip, XFS_ILOCK_SHARED); - ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || + ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || (ip->i_df.if_flags & XFS_IFEXTENTS)); /* @@ -621,14 +621,11 @@ xfs_vm_readpage( return iomap_readpage(page, &xfs_read_iomap_ops); } -STATIC int -xfs_vm_readpages( - struct file *unused, - struct address_space *mapping, - struct list_head *pages, - unsigned nr_pages) +STATIC void +xfs_vm_readahead( + struct readahead_control *rac) { - return iomap_readpages(mapping, pages, nr_pages, &xfs_read_iomap_ops); + iomap_readahead(rac, &xfs_read_iomap_ops); } static int @@ -644,7 +641,7 @@ xfs_iomap_swapfile_activate( const struct address_space_operations xfs_address_space_operations = { .readpage = xfs_vm_readpage, - .readpages = xfs_vm_readpages, + .readahead = xfs_vm_readahead, .writepage = xfs_vm_writepage, .writepages = xfs_vm_writepages, .set_page_dirty = iomap_set_page_dirty, diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index c42f90e16b4f..bfad669e6b2f 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c @@ -367,7 +367,7 @@ xfs_attr_inactive( * removal below. */ if (xfs_inode_hasattr(dp) && - dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) { + dp->i_afp->if_format != XFS_DINODE_FMT_LOCAL) { error = xfs_attr3_root_inactive(&trans, dp); if (error) goto out_cancel; @@ -388,8 +388,11 @@ out_cancel: xfs_trans_cancel(trans); out_destroy_fork: /* kill the in-core attr fork before we drop the inode lock */ - if (dp->i_afp) - xfs_idestroy_fork(dp, XFS_ATTR_FORK); + if (dp->i_afp) { + xfs_idestroy_fork(dp->i_afp); + kmem_cache_free(xfs_ifork_zone, dp->i_afp); + dp->i_afp = NULL; + } if (lock_mode) xfs_iunlock(dp, lock_mode); return error; diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c index 5ff1d929d3b5..e380bd1a9bfc 100644 --- a/fs/xfs/xfs_attr_list.c +++ b/fs/xfs/xfs_attr_list.c @@ -512,9 +512,9 @@ xfs_attr_list_ilocked( */ if (!xfs_inode_hasattr(dp)) return 0; - else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) + if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL) return xfs_attr_shortform_list(context); - else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) return xfs_attr_leaf_list(context); return xfs_attr_node_list(context); } diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c index ee6f4229cebc..6736c5ab188f 100644 --- a/fs/xfs/xfs_bmap_item.c +++ b/fs/xfs/xfs_bmap_item.c @@ -22,16 +22,20 @@ #include "xfs_bmap_btree.h" #include "xfs_trans_space.h" #include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" kmem_zone_t *xfs_bui_zone; kmem_zone_t *xfs_bud_zone; +static const struct xfs_item_ops xfs_bui_item_ops; + static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip) { return container_of(lip, struct xfs_bui_log_item, bui_item); } -void +STATIC void xfs_bui_item_free( struct xfs_bui_log_item *buip) { @@ -45,13 +49,13 @@ xfs_bui_item_free( * committed vs unpin operations in bulk insert operations. Hence the reference * count to ensure only the last caller frees the BUI. */ -void +STATIC void xfs_bui_release( struct xfs_bui_log_item *buip) { ASSERT(atomic_read(&buip->bui_refcount) > 0); if (atomic_dec_and_test(&buip->bui_refcount)) { - xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR); + xfs_trans_ail_delete(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR); xfs_bui_item_free(buip); } } @@ -124,17 +128,10 @@ xfs_bui_item_release( xfs_bui_release(BUI_ITEM(lip)); } -static const struct xfs_item_ops xfs_bui_item_ops = { - .iop_size = xfs_bui_item_size, - .iop_format = xfs_bui_item_format, - .iop_unpin = xfs_bui_item_unpin, - .iop_release = xfs_bui_item_release, -}; - /* * Allocate and initialize an bui item with the given number of extents. */ -struct xfs_bui_log_item * +STATIC struct xfs_bui_log_item * xfs_bui_init( struct xfs_mount *mp) @@ -278,27 +275,6 @@ xfs_bmap_update_diff_items( return ba->bi_owner->i_ino - bb->bi_owner->i_ino; } -/* Get an BUI. */ -STATIC void * -xfs_bmap_update_create_intent( - struct xfs_trans *tp, - unsigned int count) -{ - struct xfs_bui_log_item *buip; - - ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS); - ASSERT(tp != NULL); - - buip = xfs_bui_init(tp->t_mountp); - ASSERT(buip != NULL); - - /* - * Get a log_item_desc to point at the new item. - */ - xfs_trans_add_item(tp, &buip->bui_item); - return buip; -} - /* Set the map extent flags for this mapping. */ static void xfs_trans_set_bmap_flags( @@ -326,16 +302,12 @@ xfs_trans_set_bmap_flags( STATIC void xfs_bmap_update_log_item( struct xfs_trans *tp, - void *intent, - struct list_head *item) + struct xfs_bui_log_item *buip, + struct xfs_bmap_intent *bmap) { - struct xfs_bui_log_item *buip = intent; - struct xfs_bmap_intent *bmap; uint next_extent; struct xfs_map_extent *map; - bmap = container_of(item, struct xfs_bmap_intent, bi_list); - tp->t_flags |= XFS_TRANS_DIRTY; set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags); @@ -355,23 +327,44 @@ xfs_bmap_update_log_item( bmap->bi_bmap.br_state); } +static struct xfs_log_item * +xfs_bmap_update_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_bui_log_item *buip = xfs_bui_init(mp); + struct xfs_bmap_intent *bmap; + + ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS); + + xfs_trans_add_item(tp, &buip->bui_item); + if (sort) + list_sort(mp, items, xfs_bmap_update_diff_items); + list_for_each_entry(bmap, items, bi_list) + xfs_bmap_update_log_item(tp, buip, bmap); + return &buip->bui_item; +} + /* Get an BUD so we can process all the deferred rmap updates. */ -STATIC void * +static struct xfs_log_item * xfs_bmap_update_create_done( struct xfs_trans *tp, - void *intent, + struct xfs_log_item *intent, unsigned int count) { - return xfs_trans_get_bud(tp, intent); + return &xfs_trans_get_bud(tp, BUI_ITEM(intent))->bud_item; } /* Process a deferred rmap update. */ STATIC int xfs_bmap_update_finish_item( struct xfs_trans *tp, + struct xfs_log_item *done, struct list_head *item, - void *done_item, - void **state) + struct xfs_btree_cur **state) { struct xfs_bmap_intent *bmap; xfs_filblks_t count; @@ -379,7 +372,7 @@ xfs_bmap_update_finish_item( bmap = container_of(item, struct xfs_bmap_intent, bi_list); count = bmap->bi_bmap.br_blockcount; - error = xfs_trans_log_finish_bmap_update(tp, done_item, + error = xfs_trans_log_finish_bmap_update(tp, BUD_ITEM(done), bmap->bi_type, bmap->bi_owner, bmap->bi_whichfork, bmap->bi_bmap.br_startoff, @@ -398,9 +391,9 @@ xfs_bmap_update_finish_item( /* Abort all pending BUIs. */ STATIC void xfs_bmap_update_abort_intent( - void *intent) + struct xfs_log_item *intent) { - xfs_bui_release(intent); + xfs_bui_release(BUI_ITEM(intent)); } /* Cancel a deferred rmap update. */ @@ -416,10 +409,8 @@ xfs_bmap_update_cancel_item( const struct xfs_defer_op_type xfs_bmap_update_defer_type = { .max_items = XFS_BUI_MAX_FAST_EXTENTS, - .diff_items = xfs_bmap_update_diff_items, .create_intent = xfs_bmap_update_create_intent, .abort_intent = xfs_bmap_update_abort_intent, - .log_item = xfs_bmap_update_log_item, .create_done = xfs_bmap_update_create_done, .finish_item = xfs_bmap_update_finish_item, .cancel_item = xfs_bmap_update_cancel_item, @@ -429,32 +420,30 @@ const struct xfs_defer_op_type xfs_bmap_update_defer_type = { * Process a bmap update intent item that was recovered from the log. * We need to update some inode's bmbt. */ -int -xfs_bui_recover( - struct xfs_trans *parent_tp, - struct xfs_bui_log_item *buip) +STATIC int +xfs_bui_item_recover( + struct xfs_log_item *lip, + struct xfs_trans *parent_tp) { - int error = 0; - unsigned int bui_type; + struct xfs_bmbt_irec irec; + struct xfs_bui_log_item *buip = BUI_ITEM(lip); + struct xfs_trans *tp; + struct xfs_inode *ip = NULL; + struct xfs_mount *mp = parent_tp->t_mountp; struct xfs_map_extent *bmap; + struct xfs_bud_log_item *budp; xfs_fsblock_t startblock_fsb; xfs_fsblock_t inode_fsb; xfs_filblks_t count; - bool op_ok; - struct xfs_bud_log_item *budp; + xfs_exntst_t state; enum xfs_bmap_intent_type type; + bool op_ok; + unsigned int bui_type; int whichfork; - xfs_exntst_t state; - struct xfs_trans *tp; - struct xfs_inode *ip = NULL; - struct xfs_bmbt_irec irec; - struct xfs_mount *mp = parent_tp->t_mountp; - - ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)); + int error = 0; /* Only one mapping operation per BUI... */ if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { - set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_bui_release(buip); return -EFSCORRUPTED; } @@ -488,7 +477,6 @@ xfs_bui_recover( * This will pull the BUI from the AIL and * free the memory associated with it. */ - set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_bui_release(buip); return -EFSCORRUPTED; } @@ -546,7 +534,6 @@ xfs_bui_recover( xfs_bmap_unmap_extent(tp, ip, &irec); } - set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_defer_move(parent_tp, tp); error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); @@ -563,3 +550,121 @@ err_inode: } return error; } + +STATIC bool +xfs_bui_item_match( + struct xfs_log_item *lip, + uint64_t intent_id) +{ + return BUI_ITEM(lip)->bui_format.bui_id == intent_id; +} + +static const struct xfs_item_ops xfs_bui_item_ops = { + .iop_size = xfs_bui_item_size, + .iop_format = xfs_bui_item_format, + .iop_unpin = xfs_bui_item_unpin, + .iop_release = xfs_bui_item_release, + .iop_recover = xfs_bui_item_recover, + .iop_match = xfs_bui_item_match, +}; + +/* + * Copy an BUI format buffer from the given buf, and into the destination + * BUI format structure. The BUI/BUD items were designed not to need any + * special alignment handling. + */ +static int +xfs_bui_copy_format( + struct xfs_log_iovec *buf, + struct xfs_bui_log_format *dst_bui_fmt) +{ + struct xfs_bui_log_format *src_bui_fmt; + uint len; + + src_bui_fmt = buf->i_addr; + len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents); + + if (buf->i_len == len) { + memcpy(dst_bui_fmt, src_bui_fmt, len); + return 0; + } + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); + return -EFSCORRUPTED; +} + +/* + * This routine is called to create an in-core extent bmap update + * item from the bui format structure which was logged on disk. + * It allocates an in-core bui, copies the extents from the format + * structure into it, and adds the bui to the AIL with the given + * LSN. + */ +STATIC int +xlog_recover_bui_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + int error; + struct xfs_mount *mp = log->l_mp; + struct xfs_bui_log_item *buip; + struct xfs_bui_log_format *bui_formatp; + + bui_formatp = item->ri_buf[0].i_addr; + + if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); + return -EFSCORRUPTED; + } + buip = xfs_bui_init(mp); + error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format); + if (error) { + xfs_bui_item_free(buip); + return error; + } + atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents); + /* + * Insert the intent into the AIL directly and drop one reference so + * that finishing or canceling the work will drop the other. + */ + xfs_trans_ail_insert(log->l_ailp, &buip->bui_item, lsn); + xfs_bui_release(buip); + return 0; +} + +const struct xlog_recover_item_ops xlog_bui_item_ops = { + .item_type = XFS_LI_BUI, + .commit_pass2 = xlog_recover_bui_commit_pass2, +}; + +/* + * This routine is called when an BUD format structure is found in a committed + * transaction in the log. Its purpose is to cancel the corresponding BUI if it + * was still in the log. To do this it searches the AIL for the BUI with an id + * equal to that in the BUD format structure. If we find it we drop the BUD + * reference, which removes the BUI from the AIL and frees it. + */ +STATIC int +xlog_recover_bud_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_bud_log_format *bud_formatp; + + bud_formatp = item->ri_buf[0].i_addr; + if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) { + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); + return -EFSCORRUPTED; + } + + xlog_recover_release_intent(log, XFS_LI_BUI, bud_formatp->bud_bui_id); + return 0; +} + +const struct xlog_recover_item_ops xlog_bud_item_ops = { + .item_type = XFS_LI_BUD, + .commit_pass2 = xlog_recover_bud_commit_pass2, +}; diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h index ad479cc73de8..b9be62f8bd52 100644 --- a/fs/xfs/xfs_bmap_item.h +++ b/fs/xfs/xfs_bmap_item.h @@ -33,11 +33,6 @@ struct kmem_zone; #define XFS_BUI_MAX_FAST_EXTENTS 1 /* - * Define BUI flag bits. Manipulated by set/clear/test_bit operators. - */ -#define XFS_BUI_RECOVERED 1 - -/* * This is the "bmap update intent" log item. It is used to log the fact that * some reverse mappings need to change. It is used in conjunction with the * "bmap update done" log item described below. @@ -49,7 +44,6 @@ struct xfs_bui_log_item { struct xfs_log_item bui_item; atomic_t bui_refcount; atomic_t bui_next_extent; - unsigned long bui_flags; /* misc flags */ struct xfs_bui_log_format bui_format; }; @@ -74,9 +68,4 @@ struct xfs_bud_log_item { extern struct kmem_zone *xfs_bui_zone; extern struct kmem_zone *xfs_bud_zone; -struct xfs_bui_log_item *xfs_bui_init(struct xfs_mount *); -void xfs_bui_item_free(struct xfs_bui_log_item *); -void xfs_bui_release(struct xfs_bui_log_item *); -int xfs_bui_recover(struct xfs_trans *parent_tp, struct xfs_bui_log_item *buip); - #endif /* __XFS_BMAP_ITEM_H__ */ diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 4f800f7fe888..f37f5cc4b19f 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -223,7 +223,7 @@ xfs_bmap_count_blocks( if (!ifp) return 0; - switch (XFS_IFORK_FORMAT(ip, whichfork)) { + switch (ifp->if_format) { case XFS_DINODE_FMT_BTREE: if (!(ifp->if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(tp, ip, whichfork); @@ -449,7 +449,7 @@ xfs_getbmap( break; } - switch (XFS_IFORK_FORMAT(ip, whichfork)) { + switch (ifp->if_format) { case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: break; @@ -1210,17 +1210,26 @@ xfs_swap_extents_check_format( struct xfs_inode *ip, /* target inode */ struct xfs_inode *tip) /* tmp inode */ { + struct xfs_ifork *ifp = &ip->i_df; + struct xfs_ifork *tifp = &tip->i_df; + + /* User/group/project quota ids must match if quotas are enforced. */ + if (XFS_IS_QUOTA_ON(ip->i_mount) && + (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) || + !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) || + ip->i_d.di_projid != tip->i_d.di_projid)) + return -EINVAL; /* Should never get a local format */ - if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || - tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) + if (ifp->if_format == XFS_DINODE_FMT_LOCAL || + tifp->if_format == XFS_DINODE_FMT_LOCAL) return -EINVAL; /* * if the target inode has less extents that then temporary inode then * why did userspace call us? */ - if (ip->i_d.di_nextents < tip->i_d.di_nextents) + if (ifp->if_nextents < tifp->if_nextents) return -EINVAL; /* @@ -1235,20 +1244,18 @@ xfs_swap_extents_check_format( * form then we will end up with the target inode in the wrong format * as we already know there are less extents in the temp inode. */ - if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && - tip->i_d.di_format == XFS_DINODE_FMT_BTREE) + if (ifp->if_format == XFS_DINODE_FMT_EXTENTS && + tifp->if_format == XFS_DINODE_FMT_BTREE) return -EINVAL; /* Check temp in extent form to max in target */ - if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && - XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > - XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) + if (tifp->if_format == XFS_DINODE_FMT_EXTENTS && + tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) return -EINVAL; /* Check target in extent form to max in temp */ - if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && - XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > - XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) + if (ifp->if_format == XFS_DINODE_FMT_EXTENTS && + ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) return -EINVAL; /* @@ -1260,22 +1267,20 @@ xfs_swap_extents_check_format( * (a common defrag case) which will occur when the temp inode is in * extent format... */ - if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { + if (tifp->if_format == XFS_DINODE_FMT_BTREE) { if (XFS_IFORK_Q(ip) && - XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) + XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip)) return -EINVAL; - if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= - XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) + if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) return -EINVAL; } /* Reciprocal target->temp btree format checks */ - if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { + if (ifp->if_format == XFS_DINODE_FMT_BTREE) { if (XFS_IFORK_Q(tip) && XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) return -EINVAL; - if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= - XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) + if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) return -EINVAL; } @@ -1427,15 +1432,15 @@ xfs_swap_extent_forks( /* * Count the number of extended attribute blocks */ - if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && - (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { + if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 && + ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) { error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk, &aforkblks); if (error) return error; } - if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) && - (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { + if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 && + tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) { error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk, &taforkblks); if (error) @@ -1450,9 +1455,9 @@ xfs_swap_extent_forks( * bmbt scan as the last step. */ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { - if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) + if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE) (*target_log_flags) |= XFS_ILOG_DOWNER; - if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) + if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE) (*src_log_flags) |= XFS_ILOG_DOWNER; } @@ -1468,9 +1473,6 @@ xfs_swap_extent_forks( ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks; tip->i_d.di_nblocks = tmp + taforkblks - aforkblks; - swap(ip->i_d.di_nextents, tip->i_d.di_nextents); - swap(ip->i_d.di_format, tip->i_d.di_format); - /* * The extents in the source inode could still contain speculative * preallocation beyond EOF (e.g. the file is open but not modified @@ -1484,7 +1486,7 @@ xfs_swap_extent_forks( tip->i_delayed_blks = ip->i_delayed_blks; ip->i_delayed_blks = 0; - switch (ip->i_d.di_format) { + switch (ip->i_df.if_format) { case XFS_DINODE_FMT_EXTENTS: (*src_log_flags) |= XFS_ILOG_DEXT; break; @@ -1495,7 +1497,7 @@ xfs_swap_extent_forks( break; } - switch (tip->i_d.di_format) { + switch (tip->i_df.if_format) { case XFS_DINODE_FMT_EXTENTS: (*target_log_flags) |= XFS_ILOG_DEXT; break; @@ -1606,7 +1608,7 @@ xfs_swap_extents( if (xfs_inode_has_cow_data(tip)) { error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true); if (error) - return error; + goto out_unlock; } /* @@ -1615,9 +1617,9 @@ xfs_swap_extents( * performed with log redo items! */ if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { - int w = XFS_DATA_FORK; - uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w); - uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w); + int w = XFS_DATA_FORK; + uint32_t ipnext = ip->i_df.if_nextents; + uint32_t tipnext = tip->i_df.if_nextents; /* * Conceptually this shouldn't affect the shape of either bmbt, @@ -1717,10 +1719,11 @@ xfs_swap_extents( /* Swap the cow forks. */ if (xfs_sb_version_hasreflink(&mp->m_sb)) { - ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS); - ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS); + ASSERT(!ip->i_cowfp || + ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS); + ASSERT(!tip->i_cowfp || + tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS); - swap(ip->i_cnextents, tip->i_cnextents); swap(ip->i_cowfp, tip->i_cowfp); if (ip->i_cowfp && ip->i_cowfp->if_bytes) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 9ec3eaf1c618..20b748f7e186 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -477,7 +477,7 @@ _xfs_buf_map_pages( nofs_flag = memalloc_nofs_save(); do { bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, - -1, PAGE_KERNEL); + -1); if (bp->b_addr) break; vm_unmap_aliases(); @@ -1197,8 +1197,10 @@ xfs_buf_ioend( bp->b_ops->verify_read(bp); } - if (!bp->b_error) + if (!bp->b_error) { + bp->b_flags &= ~XBF_WRITE_FAIL; bp->b_flags |= XBF_DONE; + } if (bp->b_iodone) (*(bp->b_iodone))(bp); @@ -1242,10 +1244,26 @@ xfs_buf_ioerror_alert( struct xfs_buf *bp, xfs_failaddr_t func) { - xfs_alert_ratelimited(bp->b_mount, -"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d", - func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length, - -bp->b_error); + xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error", + "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d", + func, (uint64_t)XFS_BUF_ADDR(bp), + bp->b_length, -bp->b_error); +} + +/* + * To simulate an I/O failure, the buffer must be locked and held with at least + * three references. The LRU reference is dropped by the stale call. The buf + * item reference is dropped via ioend processing. The third reference is owned + * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC. + */ +void +xfs_buf_ioend_fail( + struct xfs_buf *bp) +{ + bp->b_flags &= ~XBF_DONE; + xfs_buf_stale(bp); + xfs_buf_ioerror(bp, -EIO); + xfs_buf_ioend(bp); } int @@ -1258,7 +1276,7 @@ xfs_bwrite( bp->b_flags |= XBF_WRITE; bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | - XBF_WRITE_FAIL | XBF_DONE); + XBF_DONE); error = xfs_buf_submit(bp); if (error) @@ -1272,6 +1290,11 @@ xfs_buf_bio_end_io( { struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; + if (!bio->bi_status && + (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) && + XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) + bio->bi_status = BLK_STS_IOERR; + /* * don't overwrite existing errors - otherwise we can lose errors on * buffers that require multiple bios to complete. @@ -1480,10 +1503,7 @@ __xfs_buf_submit( /* on shutdown we stale and complete the buffer immediately */ if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { - xfs_buf_ioerror(bp, -EIO); - bp->b_flags &= ~XBF_DONE; - xfs_buf_stale(bp); - xfs_buf_ioend(bp); + xfs_buf_ioend_fail(bp); return -EIO; } @@ -1642,7 +1662,8 @@ xfs_wait_buftarg( struct xfs_buftarg *btp) { LIST_HEAD(dispose); - int loop = 0; + int loop = 0; + bool write_fail = false; /* * First wait on the buftarg I/O count for all in-flight buffers to be @@ -1670,17 +1691,29 @@ xfs_wait_buftarg( bp = list_first_entry(&dispose, struct xfs_buf, b_lru); list_del_init(&bp->b_lru); if (bp->b_flags & XBF_WRITE_FAIL) { - xfs_alert(btp->bt_mount, + write_fail = true; + xfs_buf_alert_ratelimited(bp, + "XFS: Corruption Alert", "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!", (long long)bp->b_bn); - xfs_alert(btp->bt_mount, -"Please run xfs_repair to determine the extent of the problem."); } xfs_buf_rele(bp); } if (loop++ != 0) delay(100); } + + /* + * If one or more failed buffers were freed, that means dirty metadata + * was thrown away. This should only ever happen after I/O completion + * handling has elevated I/O error(s) to permanent failures and shuts + * down the fs. + */ + if (write_fail) { + ASSERT(XFS_FORCED_SHUTDOWN(btp->bt_mount)); + xfs_alert(btp->bt_mount, + "Please run xfs_repair to determine the extent of the problem."); + } } static enum lru_status @@ -1813,6 +1846,13 @@ xfs_alloc_buftarg( btp->bt_bdev = bdev; btp->bt_daxdev = dax_dev; + /* + * Buffer IO error rate limiting. Limit it to no more than 10 messages + * per 30 seconds so as to not spam logs too much on repeated errors. + */ + ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ, + DEFAULT_RATELIMIT_BURST); + if (xfs_setsize_buftarg_early(btp, bdev)) goto error_free; @@ -1983,7 +2023,7 @@ xfs_buf_delwri_submit_buffers( * synchronously. Otherwise, drop the buffer from the delwri * queue and submit async. */ - bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); + bp->b_flags &= ~_XBF_DELWRI_Q; bp->b_flags |= XBF_WRITE; if (wait_list) { bp->b_flags &= ~XBF_ASYNC; diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 9a04c53c2488..050c53b739e2 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -91,6 +91,7 @@ typedef struct xfs_buftarg { struct list_lru bt_lru; struct percpu_counter bt_io_count; + struct ratelimit_state bt_ioerror_rl; } xfs_buftarg_t; struct xfs_buf; @@ -263,6 +264,7 @@ extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, xfs_failaddr_t failaddr); #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa); +void xfs_buf_ioend_fail(struct xfs_buf *); extern int __xfs_buf_submit(struct xfs_buf *bp, bool); static inline int xfs_buf_submit(struct xfs_buf *bp) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 1545657c3ca0..9e75e8d6042e 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -410,7 +410,6 @@ xfs_buf_item_unpin( { struct xfs_buf_log_item *bip = BUF_ITEM(lip); xfs_buf_t *bp = bip->bli_buf; - struct xfs_ail *ailp = lip->li_ailp; int stale = bip->bli_flags & XFS_BLI_STALE; int freed; @@ -452,10 +451,10 @@ xfs_buf_item_unpin( } /* - * If we get called here because of an IO error, we may - * or may not have the item on the AIL. xfs_trans_ail_delete() - * will take care of that situation. - * xfs_trans_ail_delete() drops the AIL lock. + * If we get called here because of an IO error, we may or may + * not have the item on the AIL. xfs_trans_ail_delete() will + * take care of that situation. xfs_trans_ail_delete() drops + * the AIL lock. */ if (bip->bli_flags & XFS_BLI_STALE_INODE) { xfs_buf_do_callbacks(bp); @@ -463,47 +462,23 @@ xfs_buf_item_unpin( list_del_init(&bp->b_li_list); bp->b_iodone = NULL; } else { - spin_lock(&ailp->ail_lock); - xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR); + xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR); xfs_buf_item_relse(bp); ASSERT(bp->b_log_item == NULL); } xfs_buf_relse(bp); } else if (freed && remove) { /* - * There are currently two references to the buffer - the active - * LRU reference and the buf log item. What we are about to do - * here - simulate a failed IO completion - requires 3 - * references. - * - * The LRU reference is removed by the xfs_buf_stale() call. The - * buf item reference is removed by the xfs_buf_iodone() - * callback that is run by xfs_buf_do_callbacks() during ioend - * processing (via the bp->b_iodone callback), and then finally - * the ioend processing will drop the IO reference if the buffer - * is marked XBF_ASYNC. - * - * Hence we need to take an additional reference here so that IO - * completion processing doesn't free the buffer prematurely. + * The buffer must be locked and held by the caller to simulate + * an async I/O failure. */ xfs_buf_lock(bp); xfs_buf_hold(bp); bp->b_flags |= XBF_ASYNC; - xfs_buf_ioerror(bp, -EIO); - bp->b_flags &= ~XBF_DONE; - xfs_buf_stale(bp); - xfs_buf_ioend(bp); + xfs_buf_ioend_fail(bp); } } -/* - * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30 - * seconds so as to not spam logs too much on repeated detection of the same - * buffer being bad.. - */ - -static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10); - STATIC uint xfs_buf_item_push( struct xfs_log_item *lip, @@ -533,11 +508,10 @@ xfs_buf_item_push( trace_xfs_buf_item_push(bip); /* has a previous flush failed due to IO errors? */ - if ((bp->b_flags & XBF_WRITE_FAIL) && - ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) { - xfs_warn(bp->b_mount, -"Failing async write on buffer block 0x%llx. Retrying async write.", - (long long)bp->b_bn); + if (bp->b_flags & XBF_WRITE_FAIL) { + xfs_buf_alert_ratelimited(bp, "XFS: Failing async write", + "Failing async write on buffer block 0x%llx. Retrying async write.", + (long long)bp->b_bn); } if (!xfs_buf_delwri_queue(bp, buffer_list)) @@ -584,7 +558,7 @@ xfs_buf_item_put( * state. */ if (aborted) - xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR); + xfs_trans_ail_delete(lip, 0); xfs_buf_item_relse(bip->bli_buf); return true; } @@ -1229,61 +1203,19 @@ xfs_buf_iodone( struct xfs_buf *bp, struct xfs_log_item *lip) { - struct xfs_ail *ailp = lip->li_ailp; - ASSERT(BUF_ITEM(lip)->bli_buf == bp); xfs_buf_rele(bp); /* - * If we are forcibly shutting down, this may well be - * off the AIL already. That's because we simulate the - * log-committed callbacks to unpin these buffers. Or we may never - * have put this item on AIL because of the transaction was - * aborted forcibly. xfs_trans_ail_delete() takes care of these. + * If we are forcibly shutting down, this may well be off the AIL + * already. That's because we simulate the log-committed callbacks to + * unpin these buffers. Or we may never have put this item on AIL + * because of the transaction was aborted forcibly. + * xfs_trans_ail_delete() takes care of these. * * Either way, AIL is useless if we're forcing a shutdown. */ - spin_lock(&ailp->ail_lock); - xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); + xfs_trans_ail_delete(lip, SHUTDOWN_CORRUPT_INCORE); xfs_buf_item_free(BUF_ITEM(lip)); } - -/* - * Requeue a failed buffer for writeback. - * - * We clear the log item failed state here as well, but we have to be careful - * about reference counts because the only active reference counts on the buffer - * may be the failed log items. Hence if we clear the log item failed state - * before queuing the buffer for IO we can release all active references to - * the buffer and free it, leading to use after free problems in - * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which - * order we process them in - the buffer is locked, and we own the buffer list - * so nothing on them is going to change while we are performing this action. - * - * Hence we can safely queue the buffer for IO before we clear the failed log - * item state, therefore always having an active reference to the buffer and - * avoiding the transient zero-reference state that leads to use-after-free. - * - * Return true if the buffer was added to the buffer list, false if it was - * already on the buffer list. - */ -bool -xfs_buf_resubmit_failed_buffers( - struct xfs_buf *bp, - struct list_head *buffer_list) -{ - struct xfs_log_item *lip; - bool ret; - - ret = xfs_buf_delwri_queue(bp, buffer_list); - - /* - * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this - * function already have it acquired - */ - list_for_each_entry(lip, &bp->b_li_list, li_bio_list) - xfs_clear_li_failed(lip); - - return ret; -} diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index 30114b510332..c9c57e2da932 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h @@ -59,8 +59,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *, struct xfs_log_item *); void xfs_buf_iodone_callbacks(struct xfs_buf *); void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *); -bool xfs_buf_resubmit_failed_buffers(struct xfs_buf *, - struct list_head *); bool xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec); extern kmem_zone_t *xfs_buf_item_zone; diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c new file mode 100644 index 000000000000..04faa7310c4f --- /dev/null +++ b/fs/xfs/xfs_buf_item_recover.c @@ -0,0 +1,984 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_bit.h" +#include "xfs_mount.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_trans_priv.h" +#include "xfs_trace.h" +#include "xfs_log.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" +#include "xfs_error.h" +#include "xfs_inode.h" +#include "xfs_dir2.h" +#include "xfs_quota.h" + +/* + * This structure is used during recovery to record the buf log items which + * have been canceled and should not be replayed. + */ +struct xfs_buf_cancel { + xfs_daddr_t bc_blkno; + uint bc_len; + int bc_refcount; + struct list_head bc_list; +}; + +static struct xfs_buf_cancel * +xlog_find_buffer_cancelled( + struct xlog *log, + xfs_daddr_t blkno, + uint len) +{ + struct list_head *bucket; + struct xfs_buf_cancel *bcp; + + if (!log->l_buf_cancel_table) + return NULL; + + bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); + list_for_each_entry(bcp, bucket, bc_list) { + if (bcp->bc_blkno == blkno && bcp->bc_len == len) + return bcp; + } + + return NULL; +} + +static bool +xlog_add_buffer_cancelled( + struct xlog *log, + xfs_daddr_t blkno, + uint len) +{ + struct xfs_buf_cancel *bcp; + + /* + * If we find an existing cancel record, this indicates that the buffer + * was cancelled multiple times. To ensure that during pass 2 we keep + * the record in the table until we reach its last occurrence in the + * log, a reference count is kept to tell how many times we expect to + * see this record during the second pass. + */ + bcp = xlog_find_buffer_cancelled(log, blkno, len); + if (bcp) { + bcp->bc_refcount++; + return false; + } + + bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0); + bcp->bc_blkno = blkno; + bcp->bc_len = len; + bcp->bc_refcount = 1; + list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno)); + return true; +} + +/* + * Check if there is and entry for blkno, len in the buffer cancel record table. + */ +bool +xlog_is_buffer_cancelled( + struct xlog *log, + xfs_daddr_t blkno, + uint len) +{ + return xlog_find_buffer_cancelled(log, blkno, len) != NULL; +} + +/* + * Check if there is and entry for blkno, len in the buffer cancel record table, + * and decremented the reference count on it if there is one. + * + * Remove the cancel record once the refcount hits zero, so that if the same + * buffer is re-used again after its last cancellation we actually replay the + * changes made at that point. + */ +static bool +xlog_put_buffer_cancelled( + struct xlog *log, + xfs_daddr_t blkno, + uint len) +{ + struct xfs_buf_cancel *bcp; + + bcp = xlog_find_buffer_cancelled(log, blkno, len); + if (!bcp) { + ASSERT(0); + return false; + } + + if (--bcp->bc_refcount == 0) { + list_del(&bcp->bc_list); + kmem_free(bcp); + } + return true; +} + +/* log buffer item recovery */ + +/* + * Sort buffer items for log recovery. Most buffer items should end up on the + * buffer list and are recovered first, with the following exceptions: + * + * 1. XFS_BLF_CANCEL buffers must be processed last because some log items + * might depend on the incor ecancellation record, and replaying a cancelled + * buffer item can remove the incore record. + * + * 2. XFS_BLF_INODE_BUF buffers are handled after most regular items so that + * we replay di_next_unlinked only after flushing the inode 'free' state + * to the inode buffer. + * + * See xlog_recover_reorder_trans for more details. + */ +STATIC enum xlog_recover_reorder +xlog_recover_buf_reorder( + struct xlog_recover_item *item) +{ + struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; + + if (buf_f->blf_flags & XFS_BLF_CANCEL) + return XLOG_REORDER_CANCEL_LIST; + if (buf_f->blf_flags & XFS_BLF_INODE_BUF) + return XLOG_REORDER_INODE_BUFFER_LIST; + return XLOG_REORDER_BUFFER_LIST; +} + +STATIC void +xlog_recover_buf_ra_pass2( + struct xlog *log, + struct xlog_recover_item *item) +{ + struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; + + xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL); +} + +/* + * Build up the table of buf cancel records so that we don't replay cancelled + * data in the second pass. + */ +static int +xlog_recover_buf_commit_pass1( + struct xlog *log, + struct xlog_recover_item *item) +{ + struct xfs_buf_log_format *bf = item->ri_buf[0].i_addr; + + if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) { + xfs_err(log->l_mp, "bad buffer log item size (%d)", + item->ri_buf[0].i_len); + return -EFSCORRUPTED; + } + + if (!(bf->blf_flags & XFS_BLF_CANCEL)) + trace_xfs_log_recover_buf_not_cancel(log, bf); + else if (xlog_add_buffer_cancelled(log, bf->blf_blkno, bf->blf_len)) + trace_xfs_log_recover_buf_cancel_add(log, bf); + else + trace_xfs_log_recover_buf_cancel_ref_inc(log, bf); + return 0; +} + +/* + * Validate the recovered buffer is of the correct type and attach the + * appropriate buffer operations to them for writeback. Magic numbers are in a + * few places: + * the first 16 bits of the buffer (inode buffer, dquot buffer), + * the first 32 bits of the buffer (most blocks), + * inside a struct xfs_da_blkinfo at the start of the buffer. + */ +static void +xlog_recover_validate_buf_type( + struct xfs_mount *mp, + struct xfs_buf *bp, + struct xfs_buf_log_format *buf_f, + xfs_lsn_t current_lsn) +{ + struct xfs_da_blkinfo *info = bp->b_addr; + uint32_t magic32; + uint16_t magic16; + uint16_t magicda; + char *warnmsg = NULL; + + /* + * We can only do post recovery validation on items on CRC enabled + * fielsystems as we need to know when the buffer was written to be able + * to determine if we should have replayed the item. If we replay old + * metadata over a newer buffer, then it will enter a temporarily + * inconsistent state resulting in verification failures. Hence for now + * just avoid the verification stage for non-crc filesystems + */ + if (!xfs_sb_version_hascrc(&mp->m_sb)) + return; + + magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); + magic16 = be16_to_cpu(*(__be16*)bp->b_addr); + magicda = be16_to_cpu(info->magic); + switch (xfs_blft_from_flags(buf_f)) { + case XFS_BLFT_BTREE_BUF: + switch (magic32) { + case XFS_ABTB_CRC_MAGIC: + case XFS_ABTB_MAGIC: + bp->b_ops = &xfs_bnobt_buf_ops; + break; + case XFS_ABTC_CRC_MAGIC: + case XFS_ABTC_MAGIC: + bp->b_ops = &xfs_cntbt_buf_ops; + break; + case XFS_IBT_CRC_MAGIC: + case XFS_IBT_MAGIC: + bp->b_ops = &xfs_inobt_buf_ops; + break; + case XFS_FIBT_CRC_MAGIC: + case XFS_FIBT_MAGIC: + bp->b_ops = &xfs_finobt_buf_ops; + break; + case XFS_BMAP_CRC_MAGIC: + case XFS_BMAP_MAGIC: + bp->b_ops = &xfs_bmbt_buf_ops; + break; + case XFS_RMAP_CRC_MAGIC: + bp->b_ops = &xfs_rmapbt_buf_ops; + break; + case XFS_REFC_CRC_MAGIC: + bp->b_ops = &xfs_refcountbt_buf_ops; + break; + default: + warnmsg = "Bad btree block magic!"; + break; + } + break; + case XFS_BLFT_AGF_BUF: + if (magic32 != XFS_AGF_MAGIC) { + warnmsg = "Bad AGF block magic!"; + break; + } + bp->b_ops = &xfs_agf_buf_ops; + break; + case XFS_BLFT_AGFL_BUF: + if (magic32 != XFS_AGFL_MAGIC) { + warnmsg = "Bad AGFL block magic!"; + break; + } + bp->b_ops = &xfs_agfl_buf_ops; + break; + case XFS_BLFT_AGI_BUF: + if (magic32 != XFS_AGI_MAGIC) { + warnmsg = "Bad AGI block magic!"; + break; + } + bp->b_ops = &xfs_agi_buf_ops; + break; + case XFS_BLFT_UDQUOT_BUF: + case XFS_BLFT_PDQUOT_BUF: + case XFS_BLFT_GDQUOT_BUF: +#ifdef CONFIG_XFS_QUOTA + if (magic16 != XFS_DQUOT_MAGIC) { + warnmsg = "Bad DQUOT block magic!"; + break; + } + bp->b_ops = &xfs_dquot_buf_ops; +#else + xfs_alert(mp, + "Trying to recover dquots without QUOTA support built in!"); + ASSERT(0); +#endif + break; + case XFS_BLFT_DINO_BUF: + if (magic16 != XFS_DINODE_MAGIC) { + warnmsg = "Bad INODE block magic!"; + break; + } + bp->b_ops = &xfs_inode_buf_ops; + break; + case XFS_BLFT_SYMLINK_BUF: + if (magic32 != XFS_SYMLINK_MAGIC) { + warnmsg = "Bad symlink block magic!"; + break; + } + bp->b_ops = &xfs_symlink_buf_ops; + break; + case XFS_BLFT_DIR_BLOCK_BUF: + if (magic32 != XFS_DIR2_BLOCK_MAGIC && + magic32 != XFS_DIR3_BLOCK_MAGIC) { + warnmsg = "Bad dir block magic!"; + break; + } + bp->b_ops = &xfs_dir3_block_buf_ops; + break; + case XFS_BLFT_DIR_DATA_BUF: + if (magic32 != XFS_DIR2_DATA_MAGIC && + magic32 != XFS_DIR3_DATA_MAGIC) { + warnmsg = "Bad dir data magic!"; + break; + } + bp->b_ops = &xfs_dir3_data_buf_ops; + break; + case XFS_BLFT_DIR_FREE_BUF: + if (magic32 != XFS_DIR2_FREE_MAGIC && + magic32 != XFS_DIR3_FREE_MAGIC) { + warnmsg = "Bad dir3 free magic!"; + break; + } + bp->b_ops = &xfs_dir3_free_buf_ops; + break; + case XFS_BLFT_DIR_LEAF1_BUF: + if (magicda != XFS_DIR2_LEAF1_MAGIC && + magicda != XFS_DIR3_LEAF1_MAGIC) { + warnmsg = "Bad dir leaf1 magic!"; + break; + } + bp->b_ops = &xfs_dir3_leaf1_buf_ops; + break; + case XFS_BLFT_DIR_LEAFN_BUF: + if (magicda != XFS_DIR2_LEAFN_MAGIC && + magicda != XFS_DIR3_LEAFN_MAGIC) { + warnmsg = "Bad dir leafn magic!"; + break; + } + bp->b_ops = &xfs_dir3_leafn_buf_ops; + break; + case XFS_BLFT_DA_NODE_BUF: + if (magicda != XFS_DA_NODE_MAGIC && + magicda != XFS_DA3_NODE_MAGIC) { + warnmsg = "Bad da node magic!"; + break; + } + bp->b_ops = &xfs_da3_node_buf_ops; + break; + case XFS_BLFT_ATTR_LEAF_BUF: + if (magicda != XFS_ATTR_LEAF_MAGIC && + magicda != XFS_ATTR3_LEAF_MAGIC) { + warnmsg = "Bad attr leaf magic!"; + break; + } + bp->b_ops = &xfs_attr3_leaf_buf_ops; + break; + case XFS_BLFT_ATTR_RMT_BUF: + if (magic32 != XFS_ATTR3_RMT_MAGIC) { + warnmsg = "Bad attr remote magic!"; + break; + } + bp->b_ops = &xfs_attr3_rmt_buf_ops; + break; + case XFS_BLFT_SB_BUF: + if (magic32 != XFS_SB_MAGIC) { + warnmsg = "Bad SB block magic!"; + break; + } + bp->b_ops = &xfs_sb_buf_ops; + break; +#ifdef CONFIG_XFS_RT + case XFS_BLFT_RTBITMAP_BUF: + case XFS_BLFT_RTSUMMARY_BUF: + /* no magic numbers for verification of RT buffers */ + bp->b_ops = &xfs_rtbuf_ops; + break; +#endif /* CONFIG_XFS_RT */ + default: + xfs_warn(mp, "Unknown buffer type %d!", + xfs_blft_from_flags(buf_f)); + break; + } + + /* + * Nothing else to do in the case of a NULL current LSN as this means + * the buffer is more recent than the change in the log and will be + * skipped. + */ + if (current_lsn == NULLCOMMITLSN) + return; + + if (warnmsg) { + xfs_warn(mp, warnmsg); + ASSERT(0); + } + + /* + * We must update the metadata LSN of the buffer as it is written out to + * ensure that older transactions never replay over this one and corrupt + * the buffer. This can occur if log recovery is interrupted at some + * point after the current transaction completes, at which point a + * subsequent mount starts recovery from the beginning. + * + * Write verifiers update the metadata LSN from log items attached to + * the buffer. Therefore, initialize a bli purely to carry the LSN to + * the verifier. We'll clean it up in our ->iodone() callback. + */ + if (bp->b_ops) { + struct xfs_buf_log_item *bip; + + ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone); + bp->b_iodone = xlog_recover_iodone; + xfs_buf_item_init(bp, mp); + bip = bp->b_log_item; + bip->bli_item.li_lsn = current_lsn; + } +} + +/* + * Perform a 'normal' buffer recovery. Each logged region of the + * buffer should be copied over the corresponding region in the + * given buffer. The bitmap in the buf log format structure indicates + * where to place the logged data. + */ +STATIC void +xlog_recover_do_reg_buffer( + struct xfs_mount *mp, + struct xlog_recover_item *item, + struct xfs_buf *bp, + struct xfs_buf_log_format *buf_f, + xfs_lsn_t current_lsn) +{ + int i; + int bit; + int nbits; + xfs_failaddr_t fa; + const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot); + + trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); + + bit = 0; + i = 1; /* 0 is the buf format structure */ + while (1) { + bit = xfs_next_bit(buf_f->blf_data_map, + buf_f->blf_map_size, bit); + if (bit == -1) + break; + nbits = xfs_contig_bits(buf_f->blf_data_map, + buf_f->blf_map_size, bit); + ASSERT(nbits > 0); + ASSERT(item->ri_buf[i].i_addr != NULL); + ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); + ASSERT(BBTOB(bp->b_length) >= + ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); + + /* + * The dirty regions logged in the buffer, even though + * contiguous, may span multiple chunks. This is because the + * dirty region may span a physical page boundary in a buffer + * and hence be split into two separate vectors for writing into + * the log. Hence we need to trim nbits back to the length of + * the current region being copied out of the log. + */ + if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) + nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; + + /* + * Do a sanity check if this is a dquot buffer. Just checking + * the first dquot in the buffer should do. XXXThis is + * probably a good thing to do for other buf types also. + */ + fa = NULL; + if (buf_f->blf_flags & + (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { + if (item->ri_buf[i].i_addr == NULL) { + xfs_alert(mp, + "XFS: NULL dquot in %s.", __func__); + goto next; + } + if (item->ri_buf[i].i_len < size_disk_dquot) { + xfs_alert(mp, + "XFS: dquot too small (%d) in %s.", + item->ri_buf[i].i_len, __func__); + goto next; + } + fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, + -1, 0); + if (fa) { + xfs_alert(mp, + "dquot corrupt at %pS trying to replay into block 0x%llx", + fa, bp->b_bn); + goto next; + } + } + + memcpy(xfs_buf_offset(bp, + (uint)bit << XFS_BLF_SHIFT), /* dest */ + item->ri_buf[i].i_addr, /* source */ + nbits<<XFS_BLF_SHIFT); /* length */ + next: + i++; + bit += nbits; + } + + /* Shouldn't be any more regions */ + ASSERT(i == item->ri_total); + + xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn); +} + +/* + * Perform a dquot buffer recovery. + * Simple algorithm: if we have found a QUOTAOFF log item of the same type + * (ie. USR or GRP), then just toss this buffer away; don't recover it. + * Else, treat it as a regular buffer and do recovery. + * + * Return false if the buffer was tossed and true if we recovered the buffer to + * indicate to the caller if the buffer needs writing. + */ +STATIC bool +xlog_recover_do_dquot_buffer( + struct xfs_mount *mp, + struct xlog *log, + struct xlog_recover_item *item, + struct xfs_buf *bp, + struct xfs_buf_log_format *buf_f) +{ + uint type; + + trace_xfs_log_recover_buf_dquot_buf(log, buf_f); + + /* + * Filesystems are required to send in quota flags at mount time. + */ + if (!mp->m_qflags) + return false; + + type = 0; + if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) + type |= XFS_DQ_USER; + if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) + type |= XFS_DQ_PROJ; + if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) + type |= XFS_DQ_GROUP; + /* + * This type of quotas was turned off, so ignore this buffer + */ + if (log->l_quotaoffs_flag & type) + return false; + + xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN); + return true; +} + +/* + * Perform recovery for a buffer full of inodes. In these buffers, the only + * data which should be recovered is that which corresponds to the + * di_next_unlinked pointers in the on disk inode structures. The rest of the + * data for the inodes is always logged through the inodes themselves rather + * than the inode buffer and is recovered in xlog_recover_inode_pass2(). + * + * The only time when buffers full of inodes are fully recovered is when the + * buffer is full of newly allocated inodes. In this case the buffer will + * not be marked as an inode buffer and so will be sent to + * xlog_recover_do_reg_buffer() below during recovery. + */ +STATIC int +xlog_recover_do_inode_buffer( + struct xfs_mount *mp, + struct xlog_recover_item *item, + struct xfs_buf *bp, + struct xfs_buf_log_format *buf_f) +{ + int i; + int item_index = 0; + int bit = 0; + int nbits = 0; + int reg_buf_offset = 0; + int reg_buf_bytes = 0; + int next_unlinked_offset; + int inodes_per_buf; + xfs_agino_t *logged_nextp; + xfs_agino_t *buffer_nextp; + + trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); + + /* + * Post recovery validation only works properly on CRC enabled + * filesystems. + */ + if (xfs_sb_version_hascrc(&mp->m_sb)) + bp->b_ops = &xfs_inode_buf_ops; + + inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog; + for (i = 0; i < inodes_per_buf; i++) { + next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + + offsetof(xfs_dinode_t, di_next_unlinked); + + while (next_unlinked_offset >= + (reg_buf_offset + reg_buf_bytes)) { + /* + * The next di_next_unlinked field is beyond + * the current logged region. Find the next + * logged region that contains or is beyond + * the current di_next_unlinked field. + */ + bit += nbits; + bit = xfs_next_bit(buf_f->blf_data_map, + buf_f->blf_map_size, bit); + + /* + * If there are no more logged regions in the + * buffer, then we're done. + */ + if (bit == -1) + return 0; + + nbits = xfs_contig_bits(buf_f->blf_data_map, + buf_f->blf_map_size, bit); + ASSERT(nbits > 0); + reg_buf_offset = bit << XFS_BLF_SHIFT; + reg_buf_bytes = nbits << XFS_BLF_SHIFT; + item_index++; + } + + /* + * If the current logged region starts after the current + * di_next_unlinked field, then move on to the next + * di_next_unlinked field. + */ + if (next_unlinked_offset < reg_buf_offset) + continue; + + ASSERT(item->ri_buf[item_index].i_addr != NULL); + ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); + ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length)); + + /* + * The current logged region contains a copy of the + * current di_next_unlinked field. Extract its value + * and copy it to the buffer copy. + */ + logged_nextp = item->ri_buf[item_index].i_addr + + next_unlinked_offset - reg_buf_offset; + if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) { + xfs_alert(mp, + "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). " + "Trying to replay bad (0) inode di_next_unlinked field.", + item, bp); + return -EFSCORRUPTED; + } + + buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset); + *buffer_nextp = *logged_nextp; + + /* + * If necessary, recalculate the CRC in the on-disk inode. We + * have to leave the inode in a consistent state for whoever + * reads it next.... + */ + xfs_dinode_calc_crc(mp, + xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); + + } + + return 0; +} + +/* + * V5 filesystems know the age of the buffer on disk being recovered. We can + * have newer objects on disk than we are replaying, and so for these cases we + * don't want to replay the current change as that will make the buffer contents + * temporarily invalid on disk. + * + * The magic number might not match the buffer type we are going to recover + * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence + * extract the LSN of the existing object in the buffer based on it's current + * magic number. If we don't recognise the magic number in the buffer, then + * return a LSN of -1 so that the caller knows it was an unrecognised block and + * so can recover the buffer. + * + * Note: we cannot rely solely on magic number matches to determine that the + * buffer has a valid LSN - we also need to verify that it belongs to this + * filesystem, so we need to extract the object's LSN and compare it to that + * which we read from the superblock. If the UUIDs don't match, then we've got a + * stale metadata block from an old filesystem instance that we need to recover + * over the top of. + */ +static xfs_lsn_t +xlog_recover_get_buf_lsn( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + uint32_t magic32; + uint16_t magic16; + uint16_t magicda; + void *blk = bp->b_addr; + uuid_t *uuid; + xfs_lsn_t lsn = -1; + + /* v4 filesystems always recover immediately */ + if (!xfs_sb_version_hascrc(&mp->m_sb)) + goto recover_immediately; + + magic32 = be32_to_cpu(*(__be32 *)blk); + switch (magic32) { + case XFS_ABTB_CRC_MAGIC: + case XFS_ABTC_CRC_MAGIC: + case XFS_ABTB_MAGIC: + case XFS_ABTC_MAGIC: + case XFS_RMAP_CRC_MAGIC: + case XFS_REFC_CRC_MAGIC: + case XFS_IBT_CRC_MAGIC: + case XFS_IBT_MAGIC: { + struct xfs_btree_block *btb = blk; + + lsn = be64_to_cpu(btb->bb_u.s.bb_lsn); + uuid = &btb->bb_u.s.bb_uuid; + break; + } + case XFS_BMAP_CRC_MAGIC: + case XFS_BMAP_MAGIC: { + struct xfs_btree_block *btb = blk; + + lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); + uuid = &btb->bb_u.l.bb_uuid; + break; + } + case XFS_AGF_MAGIC: + lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); + uuid = &((struct xfs_agf *)blk)->agf_uuid; + break; + case XFS_AGFL_MAGIC: + lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); + uuid = &((struct xfs_agfl *)blk)->agfl_uuid; + break; + case XFS_AGI_MAGIC: + lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); + uuid = &((struct xfs_agi *)blk)->agi_uuid; + break; + case XFS_SYMLINK_MAGIC: + lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); + uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid; + break; + case XFS_DIR3_BLOCK_MAGIC: + case XFS_DIR3_DATA_MAGIC: + case XFS_DIR3_FREE_MAGIC: + lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); + uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; + break; + case XFS_ATTR3_RMT_MAGIC: + /* + * Remote attr blocks are written synchronously, rather than + * being logged. That means they do not contain a valid LSN + * (i.e. transactionally ordered) in them, and hence any time we + * see a buffer to replay over the top of a remote attribute + * block we should simply do so. + */ + goto recover_immediately; + case XFS_SB_MAGIC: + /* + * superblock uuids are magic. We may or may not have a + * sb_meta_uuid on disk, but it will be set in the in-core + * superblock. We set the uuid pointer for verification + * according to the superblock feature mask to ensure we check + * the relevant UUID in the superblock. + */ + lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); + if (xfs_sb_version_hasmetauuid(&mp->m_sb)) + uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid; + else + uuid = &((struct xfs_dsb *)blk)->sb_uuid; + break; + default: + break; + } + + if (lsn != (xfs_lsn_t)-1) { + if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid)) + goto recover_immediately; + return lsn; + } + + magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); + switch (magicda) { + case XFS_DIR3_LEAF1_MAGIC: + case XFS_DIR3_LEAFN_MAGIC: + case XFS_DA3_NODE_MAGIC: + lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); + uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; + break; + default: + break; + } + + if (lsn != (xfs_lsn_t)-1) { + if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) + goto recover_immediately; + return lsn; + } + + /* + * We do individual object checks on dquot and inode buffers as they + * have their own individual LSN records. Also, we could have a stale + * buffer here, so we have to at least recognise these buffer types. + * + * A notd complexity here is inode unlinked list processing - it logs + * the inode directly in the buffer, but we don't know which inodes have + * been modified, and there is no global buffer LSN. Hence we need to + * recover all inode buffer types immediately. This problem will be + * fixed by logical logging of the unlinked list modifications. + */ + magic16 = be16_to_cpu(*(__be16 *)blk); + switch (magic16) { + case XFS_DQUOT_MAGIC: + case XFS_DINODE_MAGIC: + goto recover_immediately; + default: + break; + } + + /* unknown buffer contents, recover immediately */ + +recover_immediately: + return (xfs_lsn_t)-1; + +} + +/* + * This routine replays a modification made to a buffer at runtime. + * There are actually two types of buffer, regular and inode, which + * are handled differently. Inode buffers are handled differently + * in that we only recover a specific set of data from them, namely + * the inode di_next_unlinked fields. This is because all other inode + * data is actually logged via inode records and any data we replay + * here which overlaps that may be stale. + * + * When meta-data buffers are freed at run time we log a buffer item + * with the XFS_BLF_CANCEL bit set to indicate that previous copies + * of the buffer in the log should not be replayed at recovery time. + * This is so that if the blocks covered by the buffer are reused for + * file data before we crash we don't end up replaying old, freed + * meta-data into a user's file. + * + * To handle the cancellation of buffer log items, we make two passes + * over the log during recovery. During the first we build a table of + * those buffers which have been cancelled, and during the second we + * only replay those buffers which do not have corresponding cancel + * records in the table. See xlog_recover_buf_pass[1,2] above + * for more details on the implementation of the table of cancel records. + */ +STATIC int +xlog_recover_buf_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t current_lsn) +{ + struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; + struct xfs_mount *mp = log->l_mp; + struct xfs_buf *bp; + int error; + uint buf_flags; + xfs_lsn_t lsn; + + /* + * In this pass we only want to recover all the buffers which have + * not been cancelled and are not cancellation buffers themselves. + */ + if (buf_f->blf_flags & XFS_BLF_CANCEL) { + if (xlog_put_buffer_cancelled(log, buf_f->blf_blkno, + buf_f->blf_len)) + goto cancelled; + } else { + + if (xlog_is_buffer_cancelled(log, buf_f->blf_blkno, + buf_f->blf_len)) + goto cancelled; + } + + trace_xfs_log_recover_buf_recover(log, buf_f); + + buf_flags = 0; + if (buf_f->blf_flags & XFS_BLF_INODE_BUF) + buf_flags |= XBF_UNMAPPED; + + error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, + buf_flags, &bp, NULL); + if (error) + return error; + + /* + * Recover the buffer only if we get an LSN from it and it's less than + * the lsn of the transaction we are replaying. + * + * Note that we have to be extremely careful of readahead here. + * Readahead does not attach verfiers to the buffers so if we don't + * actually do any replay after readahead because of the LSN we found + * in the buffer if more recent than that current transaction then we + * need to attach the verifier directly. Failure to do so can lead to + * future recovery actions (e.g. EFI and unlinked list recovery) can + * operate on the buffers and they won't get the verifier attached. This + * can lead to blocks on disk having the correct content but a stale + * CRC. + * + * It is safe to assume these clean buffers are currently up to date. + * If the buffer is dirtied by a later transaction being replayed, then + * the verifier will be reset to match whatever recover turns that + * buffer into. + */ + lsn = xlog_recover_get_buf_lsn(mp, bp); + if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { + trace_xfs_log_recover_buf_skip(log, buf_f); + xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN); + goto out_release; + } + + if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { + error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); + if (error) + goto out_release; + } else if (buf_f->blf_flags & + (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { + bool dirty; + + dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); + if (!dirty) + goto out_release; + } else { + xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn); + } + + /* + * Perform delayed write on the buffer. Asynchronous writes will be + * slower when taking into account all the buffers to be flushed. + * + * Also make sure that only inode buffers with good sizes stay in + * the buffer cache. The kernel moves inodes in buffers of 1 block + * or inode_cluster_size bytes, whichever is bigger. The inode + * buffers in the log can be a different size if the log was generated + * by an older kernel using unclustered inode buffers or a newer kernel + * running with a different inode cluster size. Regardless, if the + * the inode buffer size isn't max(blocksize, inode_cluster_size) + * for *our* value of inode_cluster_size, then we need to keep + * the buffer out of the buffer cache so that the buffer won't + * overlap with future reads of those inodes. + */ + if (XFS_DINODE_MAGIC == + be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && + (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) { + xfs_buf_stale(bp); + error = xfs_bwrite(bp); + } else { + ASSERT(bp->b_mount == mp); + bp->b_iodone = xlog_recover_iodone; + xfs_buf_delwri_queue(bp, buffer_list); + } + +out_release: + xfs_buf_relse(bp); + return error; +cancelled: + trace_xfs_log_recover_buf_cancel(log, buf_f); + return 0; +} + +const struct xlog_recover_item_ops xlog_buf_item_ops = { + .item_type = XFS_LI_BUF, + .reorder = xlog_recover_buf_reorder, + .ra_pass2 = xlog_recover_buf_ra_pass2, + .commit_pass1 = xlog_recover_buf_commit_pass1, + .commit_pass2 = xlog_recover_buf_commit_pass2, +}; diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c index 871ec22c9aee..66deddd5e296 100644 --- a/fs/xfs/xfs_dir2_readdir.c +++ b/fs/xfs/xfs_dir2_readdir.c @@ -524,7 +524,7 @@ xfs_readdir( args.geo = dp->i_mount->m_dir_geo; args.trans = tp; - if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) rval = xfs_dir2_sf_getdents(&args, ctx); else if ((rval = xfs_dir2_isblock(&args, &v))) ; diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index af2c8e5ceea0..d5b7f03e93c8 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -75,7 +75,7 @@ xfs_qm_adjust_dqlimits( int prealloc = 0; ASSERT(d->d_id); - defq = xfs_get_defquota(dq, q); + defq = xfs_get_defquota(q, xfs_dquot_type(dq)); if (defq->bsoftlimit && !d->d_blk_softlimit) { d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit); @@ -114,9 +114,14 @@ xfs_qm_adjust_dqlimits( void xfs_qm_adjust_dqtimers( struct xfs_mount *mp, - struct xfs_disk_dquot *d) + struct xfs_dquot *dq) { + struct xfs_quotainfo *qi = mp->m_quotainfo; + struct xfs_disk_dquot *d = &dq->q_core; + struct xfs_def_quota *defq; + ASSERT(d->d_id); + defq = xfs_get_defquota(qi, xfs_dquot_type(dq)); #ifdef DEBUG if (d->d_blk_hardlimit) @@ -138,7 +143,7 @@ xfs_qm_adjust_dqtimers( (be64_to_cpu(d->d_bcount) > be64_to_cpu(d->d_blk_hardlimit)))) { d->d_btimer = cpu_to_be32(ktime_get_real_seconds() + - mp->m_quotainfo->qi_btimelimit); + defq->btimelimit); } else { d->d_bwarns = 0; } @@ -161,7 +166,7 @@ xfs_qm_adjust_dqtimers( (be64_to_cpu(d->d_icount) > be64_to_cpu(d->d_ino_hardlimit)))) { d->d_itimer = cpu_to_be32(ktime_get_real_seconds() + - mp->m_quotainfo->qi_itimelimit); + defq->itimelimit); } else { d->d_iwarns = 0; } @@ -184,7 +189,7 @@ xfs_qm_adjust_dqtimers( (be64_to_cpu(d->d_rtbcount) > be64_to_cpu(d->d_rtb_hardlimit)))) { d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() + - mp->m_quotainfo->qi_rtbtimelimit); + defq->rtbtimelimit); } else { d->d_rtbwarns = 0; } @@ -205,16 +210,18 @@ xfs_qm_adjust_dqtimers( */ STATIC void xfs_qm_init_dquot_blk( - xfs_trans_t *tp, - xfs_mount_t *mp, - xfs_dqid_t id, - uint type, - xfs_buf_t *bp) + struct xfs_trans *tp, + struct xfs_mount *mp, + xfs_dqid_t id, + uint type, + struct xfs_buf *bp) { struct xfs_quotainfo *q = mp->m_quotainfo; - xfs_dqblk_t *d; - xfs_dqid_t curid; - int i; + struct xfs_dqblk *d; + xfs_dqid_t curid; + unsigned int qflag; + unsigned int blftype; + int i; ASSERT(tp); ASSERT(xfs_buf_islocked(bp)); @@ -238,11 +245,39 @@ xfs_qm_init_dquot_blk( } } - xfs_trans_dquot_buf(tp, bp, - (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : - ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : - XFS_BLF_GDQUOT_BUF))); - xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); + if (type & XFS_DQ_USER) { + qflag = XFS_UQUOTA_CHKD; + blftype = XFS_BLF_UDQUOT_BUF; + } else if (type & XFS_DQ_PROJ) { + qflag = XFS_PQUOTA_CHKD; + blftype = XFS_BLF_PDQUOT_BUF; + } else { + qflag = XFS_GQUOTA_CHKD; + blftype = XFS_BLF_GDQUOT_BUF; + } + + xfs_trans_dquot_buf(tp, bp, blftype); + + /* + * quotacheck uses delayed writes to update all the dquots on disk in an + * efficient manner instead of logging the individual dquot changes as + * they are made. However if we log the buffer allocated here and crash + * after quotacheck while the logged initialisation is still in the + * active region of the log, log recovery can replay the dquot buffer + * initialisation over the top of the checked dquots and corrupt quota + * accounting. + * + * To avoid this problem, quotacheck cannot log the initialised buffer. + * We must still dirty the buffer and write it back before the + * allocation transaction clears the log. Therefore, mark the buffer as + * ordered instead of logging it directly. This is safe for quotacheck + * because it detects and repairs allocated but initialized dquot blocks + * in the quota inodes. + */ + if (!(mp->m_qflags & qflag)) + xfs_trans_ordered_buf(tp, bp); + else + xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); } /* @@ -1021,6 +1056,7 @@ xfs_qm_dqflush_done( struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip; struct xfs_dquot *dqp = qip->qli_dquot; struct xfs_ail *ailp = lip->li_ailp; + xfs_lsn_t tail_lsn; /* * We only want to pull the item from the AIL if its @@ -1034,10 +1070,11 @@ xfs_qm_dqflush_done( ((lip->li_lsn == qip->qli_flush_lsn) || test_bit(XFS_LI_FAILED, &lip->li_flags))) { - /* xfs_trans_ail_delete() drops the AIL lock. */ spin_lock(&ailp->ail_lock); if (lip->li_lsn == qip->qli_flush_lsn) { - xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); + /* xfs_ail_update_finish() drops the AIL lock */ + tail_lsn = xfs_ail_delete_one(ailp, lip); + xfs_ail_update_finish(ailp, tail_lsn); } else { /* * Clear the failed state since we are about to drop the @@ -1068,6 +1105,7 @@ xfs_qm_dqflush( struct xfs_buf **bpp) { struct xfs_mount *mp = dqp->q_mount; + struct xfs_log_item *lip = &dqp->q_logitem.qli_item; struct xfs_buf *bp; struct xfs_dqblk *dqb; struct xfs_disk_dquot *ddqp; @@ -1084,31 +1122,15 @@ xfs_qm_dqflush( xfs_qm_dqunpin_wait(dqp); /* - * This may have been unpinned because the filesystem is shutting - * down forcibly. If that's the case we must not write this dquot - * to disk, because the log record didn't make it to disk. - * - * We also have to remove the log item from the AIL in this case, - * as we wait for an emptry AIL as part of the unmount process. - */ - if (XFS_FORCED_SHUTDOWN(mp)) { - struct xfs_log_item *lip = &dqp->q_logitem.qli_item; - dqp->dq_flags &= ~XFS_DQ_DIRTY; - - xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE); - - error = -EIO; - goto out_unlock; - } - - /* * Get the buffer containing the on-disk dquot */ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK, &bp, &xfs_dquot_buf_ops); - if (error) + if (error == -EAGAIN) goto out_unlock; + if (error) + goto out_abort; /* * Calculate the location of the dquot inside the buffer. @@ -1116,17 +1138,15 @@ xfs_qm_dqflush( dqb = bp->b_addr + dqp->q_bufoffset; ddqp = &dqb->dd_diskdq; - /* - * A simple sanity check in case we got a corrupted dquot. - */ - fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0); + /* sanity check the in-core structure before we flush */ + fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id), + 0); if (fa) { xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", - be32_to_cpu(ddqp->d_id), fa); + be32_to_cpu(dqp->q_core.d_id), fa); xfs_buf_relse(bp); - xfs_dqfunlock(dqp); - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - return -EFSCORRUPTED; + error = -EFSCORRUPTED; + goto out_abort; } /* This is the only portion of data that needs to persist */ @@ -1175,6 +1195,10 @@ xfs_qm_dqflush( *bpp = bp; return 0; +out_abort: + dqp->dq_flags &= ~XFS_DQ_DIRTY; + xfs_trans_ail_delete(lip, 0); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); out_unlock: xfs_dqfunlock(dqp); return error; diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index fe3e46df604b..71e36c85e20b 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -154,7 +154,7 @@ void xfs_qm_dqdestroy(struct xfs_dquot *dqp); int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp); void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp); void xfs_qm_adjust_dqtimers(struct xfs_mount *mp, - struct xfs_disk_dquot *d); + struct xfs_dquot *d); void xfs_qm_adjust_dqlimits(struct xfs_mount *mp, struct xfs_dquot *d); xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, uint type); diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index baad1748d0d1..349c92d26570 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c @@ -145,21 +145,6 @@ xfs_qm_dquot_logitem_push( if (atomic_read(&dqp->q_pincount) > 0) return XFS_ITEM_PINNED; - /* - * The buffer containing this item failed to be written back - * previously. Resubmit the buffer for IO - */ - if (test_bit(XFS_LI_FAILED, &lip->li_flags)) { - if (!xfs_buf_trylock(bp)) - return XFS_ITEM_LOCKED; - - if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list)) - rval = XFS_ITEM_FLUSHING; - - xfs_buf_unlock(bp); - return rval; - } - if (!xfs_dqlock_nowait(dqp)) return XFS_ITEM_LOCKED; @@ -358,7 +343,7 @@ xfs_qm_qoff_logitem_relse( ASSERT(test_bit(XFS_LI_IN_AIL, &lip->li_flags) || test_bit(XFS_LI_ABORTED, &lip->li_flags) || XFS_FORCED_SHUTDOWN(lip->li_mountp)); - xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR); + xfs_trans_ail_delete(lip, 0); kmem_free(lip->li_lv_shadow); kmem_free(qoff); } diff --git a/fs/xfs/xfs_dquot_item_recover.c b/fs/xfs/xfs_dquot_item_recover.c new file mode 100644 index 000000000000..3400be4c88f0 --- /dev/null +++ b/fs/xfs/xfs_dquot_item_recover.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_inode.h" +#include "xfs_quota.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_trans_priv.h" +#include "xfs_qm.h" +#include "xfs_log.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" + +STATIC void +xlog_recover_dquot_ra_pass2( + struct xlog *log, + struct xlog_recover_item *item) +{ + struct xfs_mount *mp = log->l_mp; + struct xfs_disk_dquot *recddq; + struct xfs_dq_logformat *dq_f; + uint type; + + if (mp->m_qflags == 0) + return; + + recddq = item->ri_buf[1].i_addr; + if (recddq == NULL) + return; + if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) + return; + + type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); + ASSERT(type); + if (log->l_quotaoffs_flag & type) + return; + + dq_f = item->ri_buf[0].i_addr; + ASSERT(dq_f); + ASSERT(dq_f->qlf_len == 1); + + xlog_buf_readahead(log, dq_f->qlf_blkno, + XFS_FSB_TO_BB(mp, dq_f->qlf_len), + &xfs_dquot_buf_ra_ops); +} + +/* + * Recover a dquot record + */ +STATIC int +xlog_recover_dquot_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t current_lsn) +{ + struct xfs_mount *mp = log->l_mp; + struct xfs_buf *bp; + struct xfs_disk_dquot *ddq, *recddq; + struct xfs_dq_logformat *dq_f; + xfs_failaddr_t fa; + int error; + uint type; + + /* + * Filesystems are required to send in quota flags at mount time. + */ + if (mp->m_qflags == 0) + return 0; + + recddq = item->ri_buf[1].i_addr; + if (recddq == NULL) { + xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); + return -EFSCORRUPTED; + } + if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) { + xfs_alert(log->l_mp, "dquot too small (%d) in %s.", + item->ri_buf[1].i_len, __func__); + return -EFSCORRUPTED; + } + + /* + * This type of quotas was turned off, so ignore this record. + */ + type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); + ASSERT(type); + if (log->l_quotaoffs_flag & type) + return 0; + + /* + * At this point we know that quota was _not_ turned off. + * Since the mount flags are not indicating to us otherwise, this + * must mean that quota is on, and the dquot needs to be replayed. + * Remember that we may not have fully recovered the superblock yet, + * so we can't do the usual trick of looking at the SB quota bits. + * + * The other possibility, of course, is that the quota subsystem was + * removed since the last mount - ENOSYS. + */ + dq_f = item->ri_buf[0].i_addr; + ASSERT(dq_f); + fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0); + if (fa) { + xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS", + dq_f->qlf_id, fa); + return -EFSCORRUPTED; + } + ASSERT(dq_f->qlf_len == 1); + + /* + * At this point we are assuming that the dquots have been allocated + * and hence the buffer has valid dquots stamped in it. It should, + * therefore, pass verifier validation. If the dquot is bad, then the + * we'll return an error here, so we don't need to specifically check + * the dquot in the buffer after the verifier has run. + */ + error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, + XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, + &xfs_dquot_buf_ops); + if (error) + return error; + + ASSERT(bp); + ddq = xfs_buf_offset(bp, dq_f->qlf_boffset); + + /* + * If the dquot has an LSN in it, recover the dquot only if it's less + * than the lsn of the transaction we are replaying. + */ + if (xfs_sb_version_hascrc(&mp->m_sb)) { + struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq; + xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn); + + if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { + goto out_release; + } + } + + memcpy(ddq, recddq, item->ri_buf[1].i_len); + if (xfs_sb_version_hascrc(&mp->m_sb)) { + xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), + XFS_DQUOT_CRC_OFF); + } + + ASSERT(dq_f->qlf_size == 2); + ASSERT(bp->b_mount == mp); + bp->b_iodone = xlog_recover_iodone; + xfs_buf_delwri_queue(bp, buffer_list); + +out_release: + xfs_buf_relse(bp); + return 0; +} + +const struct xlog_recover_item_ops xlog_dquot_item_ops = { + .item_type = XFS_LI_DQUOT, + .ra_pass2 = xlog_recover_dquot_ra_pass2, + .commit_pass2 = xlog_recover_dquot_commit_pass2, +}; + +/* + * Recover QUOTAOFF records. We simply make a note of it in the xlog + * structure, so that we know not to do any dquot item or dquot buffer recovery, + * of that type. + */ +STATIC int +xlog_recover_quotaoff_commit_pass1( + struct xlog *log, + struct xlog_recover_item *item) +{ + struct xfs_qoff_logformat *qoff_f = item->ri_buf[0].i_addr; + ASSERT(qoff_f); + + /* + * The logitem format's flag tells us if this was user quotaoff, + * group/project quotaoff or both. + */ + if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) + log->l_quotaoffs_flag |= XFS_DQ_USER; + if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) + log->l_quotaoffs_flag |= XFS_DQ_PROJ; + if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) + log->l_quotaoffs_flag |= XFS_DQ_GROUP; + + return 0; +} + +const struct xlog_recover_item_ops xlog_quotaoff_item_ops = { + .item_type = XFS_LI_QUOTAOFF, + .commit_pass1 = xlog_recover_quotaoff_commit_pass1, + /* nothing to commit in pass2 */ +}; diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index a21e9cc6516a..7f6e20899473 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -53,6 +53,7 @@ static unsigned int xfs_errortag_random_default[] = { XFS_RANDOM_FORCE_SCRUB_REPAIR, XFS_RANDOM_FORCE_SUMMARY_RECALC, XFS_RANDOM_IUNLINK_FALLBACK, + XFS_RANDOM_BUF_IOERROR, }; struct xfs_errortag_attr { @@ -162,6 +163,7 @@ XFS_ERRORTAG_ATTR_RW(buf_lru_ref, XFS_ERRTAG_BUF_LRU_REF); XFS_ERRORTAG_ATTR_RW(force_repair, XFS_ERRTAG_FORCE_SCRUB_REPAIR); XFS_ERRORTAG_ATTR_RW(bad_summary, XFS_ERRTAG_FORCE_SUMMARY_RECALC); XFS_ERRORTAG_ATTR_RW(iunlink_fallback, XFS_ERRTAG_IUNLINK_FALLBACK); +XFS_ERRORTAG_ATTR_RW(buf_ioerror, XFS_ERRTAG_BUF_IOERROR); static struct attribute *xfs_errortag_attrs[] = { XFS_ERRORTAG_ATTR_LIST(noerror), @@ -199,6 +201,7 @@ static struct attribute *xfs_errortag_attrs[] = { XFS_ERRORTAG_ATTR_LIST(force_repair), XFS_ERRORTAG_ATTR_LIST(bad_summary), XFS_ERRORTAG_ATTR_LIST(iunlink_fallback), + XFS_ERRORTAG_ATTR_LIST(buf_ioerror), NULL, }; diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 6ea847f6e298..b9c333bae0a1 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -22,16 +22,20 @@ #include "xfs_bmap.h" #include "xfs_trace.h" #include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" kmem_zone_t *xfs_efi_zone; kmem_zone_t *xfs_efd_zone; +static const struct xfs_item_ops xfs_efi_item_ops; + static inline struct xfs_efi_log_item *EFI_ITEM(struct xfs_log_item *lip) { return container_of(lip, struct xfs_efi_log_item, efi_item); } -void +STATIC void xfs_efi_item_free( struct xfs_efi_log_item *efip) { @@ -49,13 +53,13 @@ xfs_efi_item_free( * committed vs unpin operations in bulk insert operations. Hence the reference * count to ensure only the last caller frees the EFI. */ -void +STATIC void xfs_efi_release( struct xfs_efi_log_item *efip) { ASSERT(atomic_read(&efip->efi_refcount) > 0); if (atomic_dec_and_test(&efip->efi_refcount)) { - xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR); + xfs_trans_ail_delete(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR); xfs_efi_item_free(efip); } } @@ -139,18 +143,10 @@ xfs_efi_item_release( xfs_efi_release(EFI_ITEM(lip)); } -static const struct xfs_item_ops xfs_efi_item_ops = { - .iop_size = xfs_efi_item_size, - .iop_format = xfs_efi_item_format, - .iop_unpin = xfs_efi_item_unpin, - .iop_release = xfs_efi_item_release, -}; - - /* * Allocate and initialize an efi item with the given number of extents. */ -struct xfs_efi_log_item * +STATIC struct xfs_efi_log_item * xfs_efi_init( struct xfs_mount *mp, uint nextents) @@ -161,7 +157,7 @@ xfs_efi_init( ASSERT(nextents > 0); if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { - size = (uint)(sizeof(xfs_efi_log_item_t) + + size = (uint)(sizeof(struct xfs_efi_log_item) + ((nextents - 1) * sizeof(xfs_extent_t))); efip = kmem_zalloc(size, 0); } else { @@ -184,7 +180,7 @@ xfs_efi_init( * one of which will be the native format for this kernel. * It will handle the conversion of formats if necessary. */ -int +STATIC int xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) { xfs_efi_log_format_t *src_efi_fmt = buf->i_addr; @@ -412,41 +408,16 @@ xfs_extent_free_diff_items( XFS_FSB_TO_AGNO(mp, rb->xefi_startblock); } -/* Get an EFI. */ -STATIC void * -xfs_extent_free_create_intent( - struct xfs_trans *tp, - unsigned int count) -{ - struct xfs_efi_log_item *efip; - - ASSERT(tp != NULL); - ASSERT(count > 0); - - efip = xfs_efi_init(tp->t_mountp, count); - ASSERT(efip != NULL); - - /* - * Get a log_item_desc to point at the new item. - */ - xfs_trans_add_item(tp, &efip->efi_item); - return efip; -} - /* Log a free extent to the intent item. */ STATIC void xfs_extent_free_log_item( struct xfs_trans *tp, - void *intent, - struct list_head *item) + struct xfs_efi_log_item *efip, + struct xfs_extent_free_item *free) { - struct xfs_efi_log_item *efip = intent; - struct xfs_extent_free_item *free; uint next_extent; struct xfs_extent *extp; - free = container_of(item, struct xfs_extent_free_item, xefi_list); - tp->t_flags |= XFS_TRANS_DIRTY; set_bit(XFS_LI_DIRTY, &efip->efi_item.li_flags); @@ -462,29 +433,50 @@ xfs_extent_free_log_item( extp->ext_len = free->xefi_blockcount; } +static struct xfs_log_item * +xfs_extent_free_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_efi_log_item *efip = xfs_efi_init(mp, count); + struct xfs_extent_free_item *free; + + ASSERT(count > 0); + + xfs_trans_add_item(tp, &efip->efi_item); + if (sort) + list_sort(mp, items, xfs_extent_free_diff_items); + list_for_each_entry(free, items, xefi_list) + xfs_extent_free_log_item(tp, efip, free); + return &efip->efi_item; +} + /* Get an EFD so we can process all the free extents. */ -STATIC void * +static struct xfs_log_item * xfs_extent_free_create_done( struct xfs_trans *tp, - void *intent, + struct xfs_log_item *intent, unsigned int count) { - return xfs_trans_get_efd(tp, intent, count); + return &xfs_trans_get_efd(tp, EFI_ITEM(intent), count)->efd_item; } /* Process a free extent. */ STATIC int xfs_extent_free_finish_item( struct xfs_trans *tp, + struct xfs_log_item *done, struct list_head *item, - void *done_item, - void **state) + struct xfs_btree_cur **state) { struct xfs_extent_free_item *free; int error; free = container_of(item, struct xfs_extent_free_item, xefi_list); - error = xfs_trans_free_extent(tp, done_item, + error = xfs_trans_free_extent(tp, EFD_ITEM(done), free->xefi_startblock, free->xefi_blockcount, &free->xefi_oinfo, free->xefi_skip_discard); @@ -495,9 +487,9 @@ xfs_extent_free_finish_item( /* Abort all pending EFIs. */ STATIC void xfs_extent_free_abort_intent( - void *intent) + struct xfs_log_item *intent) { - xfs_efi_release(intent); + xfs_efi_release(EFI_ITEM(intent)); } /* Cancel a free extent. */ @@ -513,10 +505,8 @@ xfs_extent_free_cancel_item( const struct xfs_defer_op_type xfs_extent_free_defer_type = { .max_items = XFS_EFI_MAX_FAST_EXTENTS, - .diff_items = xfs_extent_free_diff_items, .create_intent = xfs_extent_free_create_intent, .abort_intent = xfs_extent_free_abort_intent, - .log_item = xfs_extent_free_log_item, .create_done = xfs_extent_free_create_done, .finish_item = xfs_extent_free_finish_item, .cancel_item = xfs_extent_free_cancel_item, @@ -529,12 +519,12 @@ const struct xfs_defer_op_type xfs_extent_free_defer_type = { STATIC int xfs_agfl_free_finish_item( struct xfs_trans *tp, + struct xfs_log_item *done, struct list_head *item, - void *done_item, - void **state) + struct xfs_btree_cur **state) { struct xfs_mount *mp = tp->t_mountp; - struct xfs_efd_log_item *efdp = done_item; + struct xfs_efd_log_item *efdp = EFD_ITEM(done); struct xfs_extent_free_item *free; struct xfs_extent *extp; struct xfs_buf *agbp; @@ -579,10 +569,8 @@ xfs_agfl_free_finish_item( /* sub-type with special handling for AGFL deferred frees */ const struct xfs_defer_op_type xfs_agfl_free_defer_type = { .max_items = XFS_EFI_MAX_FAST_EXTENTS, - .diff_items = xfs_extent_free_diff_items, .create_intent = xfs_extent_free_create_intent, .abort_intent = xfs_extent_free_abort_intent, - .log_item = xfs_extent_free_log_item, .create_done = xfs_extent_free_create_done, .finish_item = xfs_agfl_free_finish_item, .cancel_item = xfs_extent_free_cancel_item, @@ -592,19 +580,19 @@ const struct xfs_defer_op_type xfs_agfl_free_defer_type = { * Process an extent free intent item that was recovered from * the log. We need to free the extents that it describes. */ -int -xfs_efi_recover( - struct xfs_mount *mp, - struct xfs_efi_log_item *efip) +STATIC int +xfs_efi_item_recover( + struct xfs_log_item *lip, + struct xfs_trans *parent_tp) { - struct xfs_efd_log_item *efdp; - struct xfs_trans *tp; - int i; - int error = 0; - xfs_extent_t *extp; - xfs_fsblock_t startblock_fsb; - - ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)); + struct xfs_efi_log_item *efip = EFI_ITEM(lip); + struct xfs_mount *mp = parent_tp->t_mountp; + struct xfs_efd_log_item *efdp; + struct xfs_trans *tp; + struct xfs_extent *extp; + xfs_fsblock_t startblock_fsb; + int i; + int error = 0; /* * First check the validity of the extents described by the @@ -623,7 +611,6 @@ xfs_efi_recover( * This will pull the EFI from the AIL and * free the memory associated with it. */ - set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); xfs_efi_release(efip); return -EFSCORRUPTED; } @@ -644,7 +631,6 @@ xfs_efi_recover( } - set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); error = xfs_trans_commit(tp); return error; @@ -652,3 +638,93 @@ abort_error: xfs_trans_cancel(tp); return error; } + +STATIC bool +xfs_efi_item_match( + struct xfs_log_item *lip, + uint64_t intent_id) +{ + return EFI_ITEM(lip)->efi_format.efi_id == intent_id; +} + +static const struct xfs_item_ops xfs_efi_item_ops = { + .iop_size = xfs_efi_item_size, + .iop_format = xfs_efi_item_format, + .iop_unpin = xfs_efi_item_unpin, + .iop_release = xfs_efi_item_release, + .iop_recover = xfs_efi_item_recover, + .iop_match = xfs_efi_item_match, +}; + +/* + * This routine is called to create an in-core extent free intent + * item from the efi format structure which was logged on disk. + * It allocates an in-core efi, copies the extents from the format + * structure into it, and adds the efi to the AIL with the given + * LSN. + */ +STATIC int +xlog_recover_efi_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_mount *mp = log->l_mp; + struct xfs_efi_log_item *efip; + struct xfs_efi_log_format *efi_formatp; + int error; + + efi_formatp = item->ri_buf[0].i_addr; + + efip = xfs_efi_init(mp, efi_formatp->efi_nextents); + error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format); + if (error) { + xfs_efi_item_free(efip); + return error; + } + atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); + /* + * Insert the intent into the AIL directly and drop one reference so + * that finishing or canceling the work will drop the other. + */ + xfs_trans_ail_insert(log->l_ailp, &efip->efi_item, lsn); + xfs_efi_release(efip); + return 0; +} + +const struct xlog_recover_item_ops xlog_efi_item_ops = { + .item_type = XFS_LI_EFI, + .commit_pass2 = xlog_recover_efi_commit_pass2, +}; + +/* + * This routine is called when an EFD format structure is found in a committed + * transaction in the log. Its purpose is to cancel the corresponding EFI if it + * was still in the log. To do this it searches the AIL for the EFI with an id + * equal to that in the EFD format structure. If we find it we drop the EFD + * reference, which removes the EFI from the AIL and frees it. + */ +STATIC int +xlog_recover_efd_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_efd_log_format *efd_formatp; + + efd_formatp = item->ri_buf[0].i_addr; + ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + + ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || + (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + + ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t))))); + + xlog_recover_release_intent(log, XFS_LI_EFI, efd_formatp->efd_efi_id); + return 0; +} + +const struct xlog_recover_item_ops xlog_efd_item_ops = { + .item_type = XFS_LI_EFD, + .commit_pass2 = xlog_recover_efd_commit_pass2, +}; diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h index 16aaab06d4ec..cd2860c875bf 100644 --- a/fs/xfs/xfs_extfree_item.h +++ b/fs/xfs/xfs_extfree_item.h @@ -17,11 +17,6 @@ struct kmem_zone; #define XFS_EFI_MAX_FAST_EXTENTS 16 /* - * Define EFI flag bits. Manipulated by set/clear/test_bit operators. - */ -#define XFS_EFI_RECOVERED 1 - -/* * This is the "extent free intention" log item. It is used to log the fact * that some extents need to be free. It is used in conjunction with the * "extent free done" log item described below. @@ -50,25 +45,24 @@ struct kmem_zone; * of commit failure or log I/O errors. Note that the EFD is not inserted in the * AIL, so at this point both the EFI and EFD are freed. */ -typedef struct xfs_efi_log_item { +struct xfs_efi_log_item { struct xfs_log_item efi_item; atomic_t efi_refcount; atomic_t efi_next_extent; - unsigned long efi_flags; /* misc flags */ xfs_efi_log_format_t efi_format; -} xfs_efi_log_item_t; +}; /* * This is the "extent free done" log item. It is used to log * the fact that some extents earlier mentioned in an efi item * have been freed. */ -typedef struct xfs_efd_log_item { +struct xfs_efd_log_item { struct xfs_log_item efd_item; - xfs_efi_log_item_t *efd_efip; + struct xfs_efi_log_item *efd_efip; uint efd_next_extent; xfs_efd_log_format_t efd_format; -} xfs_efd_log_item_t; +}; /* * Max number of extents in fast allocation path. @@ -78,13 +72,4 @@ typedef struct xfs_efd_log_item { extern struct kmem_zone *xfs_efi_zone; extern struct kmem_zone *xfs_efd_zone; -xfs_efi_log_item_t *xfs_efi_init(struct xfs_mount *, uint); -int xfs_efi_copy_format(xfs_log_iovec_t *buf, - xfs_efi_log_format_t *dst_efi_fmt); -void xfs_efi_item_free(xfs_efi_log_item_t *); -void xfs_efi_release(struct xfs_efi_log_item *); - -int xfs_efi_recover(struct xfs_mount *mp, - struct xfs_efi_log_item *efip); - #endif /* __XFS_EXTFREE_ITEM_H__ */ diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 4b8bdecc3863..403c90309a8f 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1102,7 +1102,7 @@ xfs_dir_open( * certain to have the next operation be a read there. */ mode = xfs_ilock_data_map_shared(ip); - if (ip->i_d.di_nextents > 0) + if (ip->i_df.if_nextents > 0) error = xfs_dir3_data_readahead(ip, 0, 0); xfs_iunlock(ip, mode); return error; diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 3e61d0cc23f8..ef1d5bb88b93 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -504,10 +504,7 @@ xfs_do_force_shutdown( } else if (logerror) { xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR, "Log I/O Error Detected. Shutting down filesystem"); - } else if (flags & SHUTDOWN_DEVICE_REQ) { - xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, - "All device paths lost. Shutting down filesystem"); - } else if (!(flags & SHUTDOWN_REMOTE_REQ)) { + } else { xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, "I/O Error Detected. Shutting down filesystem"); } diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 8bf1d15be3f6..5daef654956c 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -22,6 +22,7 @@ #include "xfs_dquot_item.h" #include "xfs_dquot.h" #include "xfs_reflink.h" +#include "xfs_ialloc.h" #include <linux/iversion.h> @@ -62,8 +63,6 @@ xfs_inode_alloc( memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; ip->i_cowfp = NULL; - ip->i_cnextents = 0; - ip->i_cformat = XFS_DINODE_FMT_EXTENTS; memset(&ip->i_df, 0, sizeof(ip->i_df)); ip->i_flags = 0; ip->i_delayed_blks = 0; @@ -88,15 +87,18 @@ xfs_inode_free_callback( case S_IFREG: case S_IFDIR: case S_IFLNK: - xfs_idestroy_fork(ip, XFS_DATA_FORK); + xfs_idestroy_fork(&ip->i_df); break; } - if (ip->i_afp) - xfs_idestroy_fork(ip, XFS_ATTR_FORK); - if (ip->i_cowfp) - xfs_idestroy_fork(ip, XFS_COW_FORK); - + if (ip->i_afp) { + xfs_idestroy_fork(ip->i_afp); + kmem_cache_free(xfs_ifork_zone, ip->i_afp); + } + if (ip->i_cowfp) { + xfs_idestroy_fork(ip->i_cowfp); + kmem_cache_free(xfs_ifork_zone, ip->i_cowfp); + } if (ip->i_itemp) { ASSERT(!test_bit(XFS_LI_IN_AIL, &ip->i_itemp->ili_item.li_flags)); @@ -423,6 +425,7 @@ xfs_iget_cache_hit( spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); + ASSERT(!rwsem_is_locked(&inode->i_rwsem)); error = xfs_reinit_inode(mp, inode); if (error) { bool wake; @@ -456,9 +459,6 @@ xfs_iget_cache_hit( ip->i_sick = 0; ip->i_checked = 0; - ASSERT(!rwsem_is_locked(&inode->i_rwsem)); - init_rwsem(&inode->i_rwsem); - spin_unlock(&ip->i_flags_lock); spin_unlock(&pag->pag_ici_lock); } else { @@ -479,7 +479,7 @@ xfs_iget_cache_hit( xfs_ilock(ip, lock_flags); if (!(flags & XFS_IGET_INCORE)) - xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); + xfs_iflags_clear(ip, XFS_ISTALE); XFS_STATS_INC(mp, xs_ig_found); return 0; @@ -510,18 +510,42 @@ xfs_iget_cache_miss( if (!ip) return -ENOMEM; - error = xfs_iread(mp, tp, ip, flags); + error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags); if (error) goto out_destroy; - if (!xfs_inode_verify_forks(ip)) { - error = -EFSCORRUPTED; - goto out_destroy; + /* + * For version 5 superblocks, if we are initialising a new inode and we + * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can + * simply build the new inode core with a random generation number. + * + * For version 4 (and older) superblocks, log recovery is dependent on + * the di_flushiter field being initialised from the current on-disk + * value and hence we must also read the inode off disk even when + * initializing new inodes. + */ + if (xfs_sb_version_has_v3inode(&mp->m_sb) && + (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) { + VFS_I(ip)->i_generation = prandom_u32(); + } else { + struct xfs_dinode *dip; + struct xfs_buf *bp; + + error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0); + if (error) + goto out_destroy; + + error = xfs_inode_from_disk(ip, dip); + if (!error) + xfs_buf_set_ref(bp, XFS_INO_REF); + xfs_trans_brelse(tp, bp); + + if (error) + goto out_destroy; } trace_xfs_iget_miss(ip); - /* * Check the inode free state is valid. This also detects lookup * racing with unlinks. @@ -561,7 +585,7 @@ xfs_iget_cache_miss( */ iflags = XFS_INEW; if (flags & XFS_IGET_DONTCACHE) - iflags |= XFS_IDONTCACHE; + d_mark_dontcache(VFS_I(ip)); ip->i_udquot = NULL; ip->i_gdquot = NULL; ip->i_pdquot = NULL; @@ -737,13 +761,18 @@ xfs_icache_inode_is_allocated( */ #define XFS_LOOKUP_BATCH 32 -STATIC int -xfs_inode_ag_walk_grab( +/* + * Decide if the given @ip is eligible to be a part of the inode walk, and + * grab it if so. Returns true if it's ready to go or false if we should just + * ignore it. + */ +STATIC bool +xfs_inode_walk_ag_grab( struct xfs_inode *ip, int flags) { struct inode *inode = VFS_I(ip); - bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); + bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT); ASSERT(rcu_read_lock_held()); @@ -768,39 +797,41 @@ xfs_inode_ag_walk_grab( /* nothing to sync during shutdown */ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - return -EFSCORRUPTED; + return false; /* If we can't grab the inode, it must on it's way to reclaim. */ if (!igrab(inode)) - return -ENOENT; + return false; /* inode is valid */ - return 0; + return true; out_unlock_noent: spin_unlock(&ip->i_flags_lock); - return -ENOENT; + return false; } +/* + * For a given per-AG structure @pag, grab, @execute, and rele all incore + * inodes with the given radix tree @tag. + */ STATIC int -xfs_inode_ag_walk( - struct xfs_mount *mp, +xfs_inode_walk_ag( struct xfs_perag *pag, - int (*execute)(struct xfs_inode *ip, int flags, - void *args), - int flags, + int iter_flags, + int (*execute)(struct xfs_inode *ip, void *args), void *args, - int tag, - int iter_flags) + int tag) { + struct xfs_mount *mp = pag->pag_mount; uint32_t first_index; int last_error = 0; int skipped; - int done; + bool done; int nr_found; restart: - done = 0; + done = false; skipped = 0; first_index = 0; nr_found = 0; @@ -811,7 +842,7 @@ restart: rcu_read_lock(); - if (tag == -1) + if (tag == XFS_ICI_NO_TAG) nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void **)batch, first_index, XFS_LOOKUP_BATCH); @@ -833,7 +864,7 @@ restart: for (i = 0; i < nr_found; i++) { struct xfs_inode *ip = batch[i]; - if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) + if (done || !xfs_inode_walk_ag_grab(ip, iter_flags)) batch[i] = NULL; /* @@ -852,7 +883,7 @@ restart: continue; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) - done = 1; + done = true; } /* unlock now we've grabbed the inodes. */ @@ -861,10 +892,10 @@ restart: for (i = 0; i < nr_found; i++) { if (!batch[i]) continue; - if ((iter_flags & XFS_AGITER_INEW_WAIT) && + if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) && xfs_iflags_test(batch[i], XFS_INEW)) xfs_inew_wait(batch[i]); - error = execute(batch[i], flags, args); + error = execute(batch[i], args); xfs_irele(batch[i]); if (error == -EAGAIN) { skipped++; @@ -889,6 +920,49 @@ restart: return last_error; } +/* Fetch the next (possibly tagged) per-AG structure. */ +static inline struct xfs_perag * +xfs_inode_walk_get_perag( + struct xfs_mount *mp, + xfs_agnumber_t agno, + int tag) +{ + if (tag == XFS_ICI_NO_TAG) + return xfs_perag_get(mp, agno); + return xfs_perag_get_tag(mp, agno, tag); +} + +/* + * Call the @execute function on all incore inodes matching the radix tree + * @tag. + */ +int +xfs_inode_walk( + struct xfs_mount *mp, + int iter_flags, + int (*execute)(struct xfs_inode *ip, void *args), + void *args, + int tag) +{ + struct xfs_perag *pag; + int error = 0; + int last_error = 0; + xfs_agnumber_t ag; + + ag = 0; + while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) { + ag = pag->pag_agno + 1; + error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag); + xfs_perag_put(pag); + if (error) { + last_error = error; + if (error == -EFSCORRUPTED) + break; + } + } + return last_error; +} + /* * Background scanning to trim post-EOF preallocated space. This is queued * based on the 'speculative_prealloc_lifetime' tunable (5m by default). @@ -952,75 +1026,6 @@ xfs_cowblocks_worker( xfs_queue_cowblocks(mp); } -int -xfs_inode_ag_iterator_flags( - struct xfs_mount *mp, - int (*execute)(struct xfs_inode *ip, int flags, - void *args), - int flags, - void *args, - int iter_flags) -{ - struct xfs_perag *pag; - int error = 0; - int last_error = 0; - xfs_agnumber_t ag; - - ag = 0; - while ((pag = xfs_perag_get(mp, ag))) { - ag = pag->pag_agno + 1; - error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, - iter_flags); - xfs_perag_put(pag); - if (error) { - last_error = error; - if (error == -EFSCORRUPTED) - break; - } - } - return last_error; -} - -int -xfs_inode_ag_iterator( - struct xfs_mount *mp, - int (*execute)(struct xfs_inode *ip, int flags, - void *args), - int flags, - void *args) -{ - return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); -} - -int -xfs_inode_ag_iterator_tag( - struct xfs_mount *mp, - int (*execute)(struct xfs_inode *ip, int flags, - void *args), - int flags, - void *args, - int tag) -{ - struct xfs_perag *pag; - int error = 0; - int last_error = 0; - xfs_agnumber_t ag; - - ag = 0; - while ((pag = xfs_perag_get_tag(mp, ag, tag))) { - ag = pag->pag_agno + 1; - error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, - 0); - xfs_perag_put(pag); - if (error) { - last_error = error; - if (error == -EFSCORRUPTED) - break; - } - } - return last_error; -} - /* * Grab the inode for reclaim exclusively. * Return 0 if we grabbed it, non-zero otherwise. @@ -1128,7 +1133,7 @@ restart: if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_iunpin_wait(ip); /* xfs_iflush_abort() drops the flush lock */ - xfs_iflush_abort(ip, false); + xfs_iflush_abort(ip); goto reclaim; } if (xfs_ipincount(ip)) { @@ -1419,59 +1424,90 @@ xfs_reclaim_inodes_count( return reclaimable; } -STATIC int +STATIC bool xfs_inode_match_id( struct xfs_inode *ip, struct xfs_eofblocks *eofb) { if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) - return 0; + return false; if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) - return 0; + return false; if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && ip->i_d.di_projid != eofb->eof_prid) - return 0; + return false; - return 1; + return true; } /* * A union-based inode filtering algorithm. Process the inode if any of the * criteria match. This is for global/internal scans only. */ -STATIC int +STATIC bool xfs_inode_match_id_union( struct xfs_inode *ip, struct xfs_eofblocks *eofb) { if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) - return 1; + return true; if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) - return 1; + return true; if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && ip->i_d.di_projid == eofb->eof_prid) - return 1; + return true; - return 0; + return false; +} + +/* + * Is this inode @ip eligible for eof/cow block reclamation, given some + * filtering parameters @eofb? The inode is eligible if @eofb is null or + * if the predicate functions match. + */ +static bool +xfs_inode_matches_eofb( + struct xfs_inode *ip, + struct xfs_eofblocks *eofb) +{ + bool match; + + if (!eofb) + return true; + + if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) + match = xfs_inode_match_id_union(ip, eofb); + else + match = xfs_inode_match_id(ip, eofb); + if (!match) + return false; + + /* skip the inode if the file size is too small */ + if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) && + XFS_ISIZE(ip) < eofb->eof_min_file_size) + return false; + + return true; } STATIC int xfs_inode_free_eofblocks( struct xfs_inode *ip, - int flags, void *args) { - int ret = 0; - struct xfs_eofblocks *eofb = args; - int match; + struct xfs_eofblocks *eofb = args; + bool wait; + int ret; + + wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC); if (!xfs_can_free_eofblocks(ip, false)) { /* inode could be preallocated or append-only */ @@ -1484,62 +1520,34 @@ xfs_inode_free_eofblocks( * If the mapping is dirty the operation can block and wait for some * time. Unless we are waiting, skip it. */ - if (!(flags & SYNC_WAIT) && - mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) + if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) return 0; - if (eofb) { - if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) - match = xfs_inode_match_id_union(ip, eofb); - else - match = xfs_inode_match_id(ip, eofb); - if (!match) - return 0; - - /* skip the inode if the file size is too small */ - if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && - XFS_ISIZE(ip) < eofb->eof_min_file_size) - return 0; - } + if (!xfs_inode_matches_eofb(ip, eofb)) + return 0; /* * If the caller is waiting, return -EAGAIN to keep the background * scanner moving and revisit the inode in a subsequent pass. */ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { - if (flags & SYNC_WAIT) - ret = -EAGAIN; - return ret; + if (wait) + return -EAGAIN; + return 0; } + ret = xfs_free_eofblocks(ip); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return ret; } -static int -__xfs_icache_free_eofblocks( - struct xfs_mount *mp, - struct xfs_eofblocks *eofb, - int (*execute)(struct xfs_inode *ip, int flags, - void *args), - int tag) -{ - int flags = SYNC_TRYLOCK; - - if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC)) - flags = SYNC_WAIT; - - return xfs_inode_ag_iterator_tag(mp, execute, flags, - eofb, tag); -} - int xfs_icache_free_eofblocks( struct xfs_mount *mp, struct xfs_eofblocks *eofb) { - return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks, + return xfs_inode_walk(mp, 0, xfs_inode_free_eofblocks, eofb, XFS_ICI_EOFBLOCKS_TAG); } @@ -1756,29 +1764,16 @@ xfs_prep_free_cowblocks( STATIC int xfs_inode_free_cowblocks( struct xfs_inode *ip, - int flags, void *args) { struct xfs_eofblocks *eofb = args; - int match; int ret = 0; if (!xfs_prep_free_cowblocks(ip)) return 0; - if (eofb) { - if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) - match = xfs_inode_match_id_union(ip, eofb); - else - match = xfs_inode_match_id(ip, eofb); - if (!match) - return 0; - - /* skip the inode if the file size is too small */ - if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && - XFS_ISIZE(ip) < eofb->eof_min_file_size) - return 0; - } + if (!xfs_inode_matches_eofb(ip, eofb)) + return 0; /* Free the CoW blocks */ xfs_ilock(ip, XFS_IOLOCK_EXCL); @@ -1802,7 +1797,7 @@ xfs_icache_free_cowblocks( struct xfs_mount *mp, struct xfs_eofblocks *eofb) { - return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks, + return xfs_inode_walk(mp, 0, xfs_inode_free_cowblocks, eofb, XFS_ICI_COWBLOCKS_TAG); } diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index 48f1fd2bb6ad..93b54e7d55f0 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h @@ -24,7 +24,7 @@ struct xfs_eofblocks { * tags for inode radix tree */ #define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup - in xfs_inode_ag_iterator */ + in xfs_inode_walk */ #define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */ #define XFS_ICI_EOFBLOCKS_TAG 1 /* inode has blocks beyond EOF */ #define XFS_ICI_COWBLOCKS_TAG 2 /* inode can have cow blocks to gc */ @@ -40,7 +40,7 @@ struct xfs_eofblocks { /* * flags for AG inode iterator */ -#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */ +#define XFS_INODE_WALK_INEW_WAIT 0x1 /* wait on new inodes */ int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp); @@ -71,50 +71,9 @@ int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip); void xfs_cowblocks_worker(struct work_struct *); void xfs_queue_cowblocks(struct xfs_mount *); -int xfs_inode_ag_iterator(struct xfs_mount *mp, - int (*execute)(struct xfs_inode *ip, int flags, void *args), - int flags, void *args); -int xfs_inode_ag_iterator_flags(struct xfs_mount *mp, - int (*execute)(struct xfs_inode *ip, int flags, void *args), - int flags, void *args, int iter_flags); -int xfs_inode_ag_iterator_tag(struct xfs_mount *mp, - int (*execute)(struct xfs_inode *ip, int flags, void *args), - int flags, void *args, int tag); - -static inline int -xfs_fs_eofblocks_from_user( - struct xfs_fs_eofblocks *src, - struct xfs_eofblocks *dst) -{ - if (src->eof_version != XFS_EOFBLOCKS_VERSION) - return -EINVAL; - - if (src->eof_flags & ~XFS_EOF_FLAGS_VALID) - return -EINVAL; - - if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) || - memchr_inv(src->pad64, 0, sizeof(src->pad64))) - return -EINVAL; - - dst->eof_flags = src->eof_flags; - dst->eof_prid = src->eof_prid; - dst->eof_min_file_size = src->eof_min_file_size; - - dst->eof_uid = INVALID_UID; - if (src->eof_flags & XFS_EOF_FLAGS_UID) { - dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid); - if (!uid_valid(dst->eof_uid)) - return -EINVAL; - } - - dst->eof_gid = INVALID_GID; - if (src->eof_flags & XFS_EOF_FLAGS_GID) { - dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid); - if (!gid_valid(dst->eof_gid)) - return -EINVAL; - } - return 0; -} +int xfs_inode_walk(struct xfs_mount *mp, int iter_flags, + int (*execute)(struct xfs_inode *ip, void *args), + void *args, int tag); int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino, bool *inuse); diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c index 490fee22b878..287a9e5c7d75 100644 --- a/fs/xfs/xfs_icreate_item.c +++ b/fs/xfs/xfs_icreate_item.c @@ -6,11 +6,19 @@ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" +#include "xfs_format.h" #include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_icreate_item.h" #include "xfs_log.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" +#include "xfs_ialloc.h" +#include "xfs_trace.h" kmem_zone_t *xfs_icreate_zone; /* inode create item zone */ @@ -107,3 +115,147 @@ xfs_icreate_log( tp->t_flags |= XFS_TRANS_DIRTY; set_bit(XFS_LI_DIRTY, &icp->ic_item.li_flags); } + +static enum xlog_recover_reorder +xlog_recover_icreate_reorder( + struct xlog_recover_item *item) +{ + /* + * Inode allocation buffers must be replayed before subsequent inode + * items try to modify those buffers. ICREATE items are the logical + * equivalent of logging a newly initialized inode buffer, so recover + * these at the same time that we recover logged buffers. + */ + return XLOG_REORDER_BUFFER_LIST; +} + +/* + * This routine is called when an inode create format structure is found in a + * committed transaction in the log. It's purpose is to initialise the inodes + * being allocated on disk. This requires us to get inode cluster buffers that + * match the range to be initialised, stamped with inode templates and written + * by delayed write so that subsequent modifications will hit the cached buffer + * and only need writing out at the end of recovery. + */ +STATIC int +xlog_recover_icreate_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_mount *mp = log->l_mp; + struct xfs_icreate_log *icl; + struct xfs_ino_geometry *igeo = M_IGEO(mp); + xfs_agnumber_t agno; + xfs_agblock_t agbno; + unsigned int count; + unsigned int isize; + xfs_agblock_t length; + int bb_per_cluster; + int cancel_count; + int nbufs; + int i; + + icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; + if (icl->icl_type != XFS_LI_ICREATE) { + xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); + return -EINVAL; + } + + if (icl->icl_size != 1) { + xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); + return -EINVAL; + } + + agno = be32_to_cpu(icl->icl_ag); + if (agno >= mp->m_sb.sb_agcount) { + xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); + return -EINVAL; + } + agbno = be32_to_cpu(icl->icl_agbno); + if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { + xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); + return -EINVAL; + } + isize = be32_to_cpu(icl->icl_isize); + if (isize != mp->m_sb.sb_inodesize) { + xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); + return -EINVAL; + } + count = be32_to_cpu(icl->icl_count); + if (!count) { + xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); + return -EINVAL; + } + length = be32_to_cpu(icl->icl_length); + if (!length || length >= mp->m_sb.sb_agblocks) { + xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); + return -EINVAL; + } + + /* + * The inode chunk is either full or sparse and we only support + * m_ino_geo.ialloc_min_blks sized sparse allocations at this time. + */ + if (length != igeo->ialloc_blks && + length != igeo->ialloc_min_blks) { + xfs_warn(log->l_mp, + "%s: unsupported chunk length", __FUNCTION__); + return -EINVAL; + } + + /* verify inode count is consistent with extent length */ + if ((count >> mp->m_sb.sb_inopblog) != length) { + xfs_warn(log->l_mp, + "%s: inconsistent inode count and chunk length", + __FUNCTION__); + return -EINVAL; + } + + /* + * The icreate transaction can cover multiple cluster buffers and these + * buffers could have been freed and reused. Check the individual + * buffers for cancellation so we don't overwrite anything written after + * a cancellation. + */ + bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster); + nbufs = length / igeo->blocks_per_cluster; + for (i = 0, cancel_count = 0; i < nbufs; i++) { + xfs_daddr_t daddr; + + daddr = XFS_AGB_TO_DADDR(mp, agno, + agbno + i * igeo->blocks_per_cluster); + if (xlog_is_buffer_cancelled(log, daddr, bb_per_cluster)) + cancel_count++; + } + + /* + * We currently only use icreate for a single allocation at a time. This + * means we should expect either all or none of the buffers to be + * cancelled. Be conservative and skip replay if at least one buffer is + * cancelled, but warn the user that something is awry if the buffers + * are not consistent. + * + * XXX: This must be refined to only skip cancelled clusters once we use + * icreate for multiple chunk allocations. + */ + ASSERT(!cancel_count || cancel_count == nbufs); + if (cancel_count) { + if (cancel_count != nbufs) + xfs_warn(mp, + "WARNING: partial inode chunk cancellation, skipped icreate."); + trace_xfs_log_recover_icreate_cancel(log, icl); + return 0; + } + + trace_xfs_log_recover_icreate_recover(log, icl); + return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno, + length, be32_to_cpu(icl->icl_gen)); +} + +const struct xlog_recover_item_ops xlog_icreate_item_ops = { + .item_type = XFS_LI_ICREATE, + .reorder = xlog_recover_icreate_reorder, + .commit_pass2 = xlog_recover_icreate_commit_pass2, +}; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d1772786af29..64f5f9a440ae 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -112,7 +112,7 @@ xfs_ilock_data_map_shared( { uint lock_mode = XFS_ILOCK_SHARED; - if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && + if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE && (ip->i_df.if_flags & XFS_IFEXTENTS) == 0) lock_mode = XFS_ILOCK_EXCL; xfs_ilock(ip, lock_mode); @@ -125,7 +125,8 @@ xfs_ilock_attr_map_shared( { uint lock_mode = XFS_ILOCK_SHARED; - if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE && + if (ip->i_afp && + ip->i_afp->if_format == XFS_DINODE_FMT_BTREE && (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0) lock_mode = XFS_ILOCK_EXCL; xfs_ilock(ip, lock_mode); @@ -825,7 +826,7 @@ xfs_ialloc( inode->i_mode &= ~S_ISGID; ip->i_d.di_size = 0; - ip->i_d.di_nextents = 0; + ip->i_df.if_nextents = 0; ASSERT(ip->i_d.di_nblocks == 0); tv = current_time(inode); @@ -851,7 +852,7 @@ xfs_ialloc( case S_IFCHR: case S_IFBLK: case S_IFSOCK: - ip->i_d.di_format = XFS_DINODE_FMT_DEV; + ip->i_df.if_format = XFS_DINODE_FMT_DEV; ip->i_df.if_flags = 0; flags |= XFS_ILOG_DEV; break; @@ -907,7 +908,7 @@ xfs_ialloc( } /* FALLTHROUGH */ case S_IFLNK: - ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; + ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; ip->i_df.if_flags = XFS_IFEXTENTS; ip->i_df.if_bytes = 0; ip->i_df.if_u1.if_root = NULL; @@ -915,11 +916,6 @@ xfs_ialloc( default: ASSERT(0); } - /* - * Attribute fork settings for new inode. - */ - ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; - ip->i_d.di_anextents = 0; /* * Log the new values stuffed into the inode. @@ -1686,7 +1682,7 @@ xfs_inactive_truncate( if (error) goto error_trans_cancel; - ASSERT(ip->i_d.di_nextents == 0); + ASSERT(ip->i_df.if_nextents == 0); error = xfs_trans_commit(tp); if (error) @@ -1836,7 +1832,7 @@ xfs_inactive( if (S_ISREG(VFS_I(ip)->i_mode) && (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 || - ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0)) + ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0)) truncate = 1; error = xfs_qm_dqattach(ip); @@ -1862,7 +1858,6 @@ xfs_inactive( } ASSERT(!ip->i_afp); - ASSERT(ip->i_d.di_anextents == 0); ASSERT(ip->i_d.di_forkoff == 0); /* @@ -2172,7 +2167,7 @@ xfs_iunlink_update_inode( ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino)); - error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0); + error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0); if (error) return error; @@ -2302,7 +2297,7 @@ xfs_iunlink_map_ino( return error; } - error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0); + error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0); if (error) { xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", __func__, error); @@ -2602,7 +2597,7 @@ xfs_ifree_cluster( xfs_daddr_t blkno; xfs_buf_t *bp; xfs_inode_t *ip; - xfs_inode_log_item_t *iip; + struct xfs_inode_log_item *iip; struct xfs_log_item *lip; struct xfs_perag *pag; struct xfs_ino_geometry *igeo = M_IGEO(mp); @@ -2662,7 +2657,7 @@ xfs_ifree_cluster( */ list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { if (lip->li_type == XFS_LI_INODE) { - iip = (xfs_inode_log_item_t *)lip; + iip = (struct xfs_inode_log_item *)lip; ASSERT(iip->ili_logged == 1); lip->li_cb = xfs_istale_done; xfs_trans_ail_copy_lsn(mp->m_ail, @@ -2712,24 +2707,6 @@ xfs_ifree_cluster( } /* - * Free any local-format buffers sitting around before we reset to - * extents format. - */ -static inline void -xfs_ifree_local_data( - struct xfs_inode *ip, - int whichfork) -{ - struct xfs_ifork *ifp; - - if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) - return; - - ifp = XFS_IFORK_PTR(ip, whichfork); - xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); -} - -/* * This is called to return an inode to the inode free list. * The inode should already be truncated to 0 length and have * no pages associated with it. This routine also assumes that @@ -2749,8 +2726,7 @@ xfs_ifree( ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(VFS_I(ip)->i_nlink == 0); - ASSERT(ip->i_d.di_nextents == 0); - ASSERT(ip->i_d.di_anextents == 0); + ASSERT(ip->i_df.if_nextents == 0); ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode)); ASSERT(ip->i_d.di_nblocks == 0); @@ -2765,16 +2741,23 @@ xfs_ifree( if (error) return error; - xfs_ifree_local_data(ip, XFS_DATA_FORK); - xfs_ifree_local_data(ip, XFS_ATTR_FORK); + /* + * Free any local-format data sitting around before we reset the + * data fork to extents format. Note that the attr fork data has + * already been freed by xfs_attr_inactive. + */ + if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) { + kmem_free(ip->i_df.if_u1.if_data); + ip->i_df.if_u1.if_data = NULL; + ip->i_df.if_bytes = 0; + } VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ ip->i_d.di_flags = 0; ip->i_d.di_flags2 = 0; ip->i_d.di_dmevmask = 0; ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ - ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; - ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; + ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; /* Don't attempt to replay owner changes for a deleted inode */ ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER); @@ -3496,6 +3479,7 @@ xfs_iflush_cluster( struct xfs_inode **cilist; struct xfs_inode *cip; struct xfs_ino_geometry *igeo = M_IGEO(mp); + int error = 0; int nr_found; int clcount = 0; int i; @@ -3588,11 +3572,10 @@ xfs_iflush_cluster( * re-check that it's dirty before flushing. */ if (!xfs_inode_clean(cip)) { - int error; error = xfs_iflush_int(cip, bp); if (error) { xfs_iunlock(cip, XFS_ILOCK_SHARED); - goto cluster_corrupt_out; + goto out_free; } clcount++; } else { @@ -3611,37 +3594,7 @@ out_free: kmem_free(cilist); out_put: xfs_perag_put(pag); - return 0; - - -cluster_corrupt_out: - /* - * Corruption detected in the clustering loop. Invalidate the - * inode buffer and shut down the filesystem. - */ - rcu_read_unlock(); - - /* - * We'll always have an inode attached to the buffer for completion - * process by the time we are called from xfs_iflush(). Hence we have - * always need to do IO completion processing to abort the inodes - * attached to the buffer. handle them just like the shutdown case in - * xfs_buf_submit(). - */ - ASSERT(bp->b_iodone); - bp->b_flags |= XBF_ASYNC; - bp->b_flags &= ~XBF_DONE; - xfs_buf_stale(bp); - xfs_buf_ioerror(bp, -EIO); - xfs_buf_ioend(bp); - - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - - /* abort the corrupt inode, as it was not attached to the buffer */ - xfs_iflush_abort(cip, false); - kmem_free(cilist); - xfs_perag_put(pag); - return -EFSCORRUPTED; + return error; } /* @@ -3667,8 +3620,8 @@ xfs_iflush( ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(xfs_isiflocked(ip)); - ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || - ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); + ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || + ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); *bpp = NULL; @@ -3688,42 +3641,20 @@ xfs_iflush( } /* - * This may have been unpinned because the filesystem is shutting - * down forcibly. If that's the case we must not write this inode - * to disk, because the log record didn't make it to disk. - * - * We also have to remove the log item from the AIL in this case, - * as we wait for an empty AIL as part of the unmount process. - */ - if (XFS_FORCED_SHUTDOWN(mp)) { - error = -EIO; - goto abort_out; - } - - /* * Get the buffer containing the on-disk inode. We are doing a try-lock - * operation here, so we may get an EAGAIN error. In that case, we - * simply want to return with the inode still dirty. + * operation here, so we may get an EAGAIN error. In that case, return + * leaving the inode dirty. * * If we get any other error, we effectively have a corruption situation - * and we cannot flush the inode, so we treat it the same as failing - * xfs_iflush_int(). + * and we cannot flush the inode. Abort the flush and shut down. */ - error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK, - 0); + error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK); if (error == -EAGAIN) { xfs_ifunlock(ip); return error; } if (error) - goto corrupt_out; - - /* - * First flush out the inode that xfs_iflush was called with. - */ - error = xfs_iflush_int(ip, bp); - if (error) - goto corrupt_out; + goto abort; /* * If the buffer is pinned then push on the log now so we won't @@ -3733,61 +3664,32 @@ xfs_iflush( xfs_log_force(mp, 0); /* - * inode clustering: try to gather other inodes into this write + * Flush the provided inode then attempt to gather others from the + * cluster into the write. * - * Note: Any error during clustering will result in the filesystem - * being shut down and completion callbacks run on the cluster buffer. - * As we have already flushed and attached this inode to the buffer, - * it has already been aborted and released by xfs_iflush_cluster() and - * so we have no further error handling to do here. + * Note: Once we attempt to flush an inode, we must run buffer + * completion callbacks on any failure. If this fails, simulate an I/O + * failure on the buffer and shut down. */ - error = xfs_iflush_cluster(ip, bp); - if (error) - return error; + error = xfs_iflush_int(ip, bp); + if (!error) + error = xfs_iflush_cluster(ip, bp); + if (error) { + bp->b_flags |= XBF_ASYNC; + xfs_buf_ioend_fail(bp); + goto shutdown; + } *bpp = bp; return 0; -corrupt_out: - if (bp) - xfs_buf_relse(bp); +abort: + xfs_iflush_abort(ip); +shutdown: xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); -abort_out: - /* abort the corrupt inode, as it was not attached to the buffer */ - xfs_iflush_abort(ip, false); return error; } -/* - * If there are inline format data / attr forks attached to this inode, - * make sure they're not corrupt. - */ -bool -xfs_inode_verify_forks( - struct xfs_inode *ip) -{ - struct xfs_ifork *ifp; - xfs_failaddr_t fa; - - fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops); - if (fa) { - ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); - xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork", - ifp->if_u1.if_data, ifp->if_bytes, fa); - return false; - } - - fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops); - if (fa) { - ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK); - xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork", - ifp ? ifp->if_u1.if_data : NULL, - ifp ? ifp->if_bytes : 0, fa); - return false; - } - return true; -} - STATIC int xfs_iflush_int( struct xfs_inode *ip, @@ -3796,61 +3698,68 @@ xfs_iflush_int( struct xfs_inode_log_item *iip = ip->i_itemp; struct xfs_dinode *dip; struct xfs_mount *mp = ip->i_mount; + int error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(xfs_isiflocked(ip)); - ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || - ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); + ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || + ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); ASSERT(iip != NULL && iip->ili_fields != 0); - /* set *dip = inode's place in the buffer */ dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); + /* + * We don't flush the inode if any of the following checks fail, but we + * do still update the log item and attach to the backing buffer as if + * the flush happened. This is a formality to facilitate predictable + * error handling as the caller will shutdown and fail the buffer. + */ + error = -EFSCORRUPTED; if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), mp, XFS_ERRTAG_IFLUSH_1)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT, __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); - goto corrupt_out; + goto flush_out; } if (S_ISREG(VFS_I(ip)->i_mode)) { if (XFS_TEST_ERROR( - (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && - (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), + ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && + ip->i_df.if_format != XFS_DINODE_FMT_BTREE, mp, XFS_ERRTAG_IFLUSH_3)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad regular inode %Lu, ptr "PTR_FMT, __func__, ip->i_ino, ip); - goto corrupt_out; + goto flush_out; } } else if (S_ISDIR(VFS_I(ip)->i_mode)) { if (XFS_TEST_ERROR( - (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && - (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && - (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), + ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && + ip->i_df.if_format != XFS_DINODE_FMT_BTREE && + ip->i_df.if_format != XFS_DINODE_FMT_LOCAL, mp, XFS_ERRTAG_IFLUSH_4)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad directory inode %Lu, ptr "PTR_FMT, __func__, ip->i_ino, ip); - goto corrupt_out; + goto flush_out; } } - if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > + if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) > ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: detected corrupt incore inode %Lu, " "total extents = %d, nblocks = %Ld, ptr "PTR_FMT, __func__, ip->i_ino, - ip->i_d.di_nextents + ip->i_d.di_anextents, + ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp), ip->i_d.di_nblocks, ip); - goto corrupt_out; + goto flush_out; } if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, mp, XFS_ERRTAG_IFLUSH_6)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT, __func__, ip->i_ino, ip->i_d.di_forkoff, ip); - goto corrupt_out; + goto flush_out; } /* @@ -3865,9 +3774,16 @@ xfs_iflush_int( if (!xfs_sb_version_has_v3inode(&mp->m_sb)) ip->i_d.di_flushiter++; - /* Check the inline fork data before we write out. */ - if (!xfs_inode_verify_forks(ip)) - goto corrupt_out; + /* + * If there are inline format data / attr forks attached to this inode, + * make sure they are not corrupt. + */ + if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL && + xfs_ifork_verify_local_data(ip)) + goto flush_out; + if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL && + xfs_ifork_verify_local_attr(ip)) + goto flush_out; /* * Copy the dirty parts of the inode into the on-disk inode. We always @@ -3910,6 +3826,8 @@ xfs_iflush_int( * need the AIL lock, because it is a 64 bit value that cannot be read * atomically. */ + error = 0; +flush_out: iip->ili_last_fields = iip->ili_fields; iip->ili_fields = 0; iip->ili_fsync_fields = 0; @@ -3919,10 +3837,10 @@ xfs_iflush_int( &iip->ili_item.li_lsn); /* - * Attach the function xfs_iflush_done to the inode's - * buffer. This will remove the inode from the AIL - * and unlock the inode's flush lock when the inode is - * completely written to disk. + * Attach the inode item callback to the buffer whether the flush + * succeeded or not. If not, the caller will shut down and fail I/O + * completion on the buffer to remove the inode from the AIL and release + * the flush lock. */ xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); @@ -3931,10 +3849,7 @@ xfs_iflush_int( ASSERT(!list_empty(&bp->b_li_list)); ASSERT(bp->b_iodone != NULL); - return 0; - -corrupt_out: - return -EFSCORRUPTED; + return error; } /* Release an inode. */ diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index c6a63f6764a6..47d3b391030d 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -57,9 +57,6 @@ typedef struct xfs_inode { struct xfs_icdinode i_d; /* most of ondisk inode */ - xfs_extnum_t i_cnextents; /* # of extents in cow fork */ - unsigned int i_cformat; /* format of cow fork */ - /* VFS inode */ struct inode i_vnode; /* embedded VFS inode */ @@ -218,8 +215,7 @@ static inline bool xfs_inode_has_cow_data(struct xfs_inode *ip) #define XFS_IFLOCK (1 << __XFS_IFLOCK_BIT) #define __XFS_IPINNED_BIT 8 /* wakeup key for zero pin count */ #define XFS_IPINNED (1 << __XFS_IPINNED_BIT) -#define XFS_IDONTCACHE (1 << 9) /* don't cache the inode long term */ -#define XFS_IEOFBLOCKS (1 << 10)/* has the preallocblocks tag set */ +#define XFS_IEOFBLOCKS (1 << 9) /* has the preallocblocks tag set */ /* * If this unlinked inode is in the middle of recovery, don't let drop_inode * truncate and free the inode. This can happen if we iget the inode during @@ -467,6 +463,7 @@ int xfs_break_layouts(struct inode *inode, uint *iolock, /* from xfs_iops.c */ extern void xfs_setup_inode(struct xfs_inode *ip); extern void xfs_setup_iops(struct xfs_inode *ip); +extern void xfs_diflags_to_iflags(struct xfs_inode *ip, bool init); /* * When setting up a newly allocated inode, we need to call @@ -497,8 +494,6 @@ extern struct kmem_zone *xfs_inode_zone; /* The default CoW extent size hint. */ #define XFS_DEFAULT_COWEXTSZ_HINT 32 -bool xfs_inode_verify_forks(struct xfs_inode *ip); - int xfs_iunlink_init(struct xfs_perag *pag); void xfs_iunlink_destroy(struct xfs_perag *pag); diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index f779cca2346f..ba47bf65b772 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -36,10 +36,10 @@ xfs_inode_item_data_fork_size( { struct xfs_inode *ip = iip->ili_inode; - switch (ip->i_d.di_format) { + switch (ip->i_df.if_format) { case XFS_DINODE_FMT_EXTENTS: if ((iip->ili_fields & XFS_ILOG_DEXT) && - ip->i_d.di_nextents > 0 && + ip->i_df.if_nextents > 0 && ip->i_df.if_bytes > 0) { /* worst case, doesn't subtract delalloc extents */ *nbytes += XFS_IFORK_DSIZE(ip); @@ -77,10 +77,10 @@ xfs_inode_item_attr_fork_size( { struct xfs_inode *ip = iip->ili_inode; - switch (ip->i_d.di_aformat) { + switch (ip->i_afp->if_format) { case XFS_DINODE_FMT_EXTENTS: if ((iip->ili_fields & XFS_ILOG_AEXT) && - ip->i_d.di_anextents > 0 && + ip->i_afp->if_nextents > 0 && ip->i_afp->if_bytes > 0) { /* worst case, doesn't subtract unused space */ *nbytes += XFS_IFORK_ASIZE(ip); @@ -142,13 +142,13 @@ xfs_inode_item_format_data_fork( struct xfs_inode *ip = iip->ili_inode; size_t data_bytes; - switch (ip->i_d.di_format) { + switch (ip->i_df.if_format) { case XFS_DINODE_FMT_EXTENTS: iip->ili_fields &= ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV); if ((iip->ili_fields & XFS_ILOG_DEXT) && - ip->i_d.di_nextents > 0 && + ip->i_df.if_nextents > 0 && ip->i_df.if_bytes > 0) { struct xfs_bmbt_rec *p; @@ -227,18 +227,18 @@ xfs_inode_item_format_attr_fork( struct xfs_inode *ip = iip->ili_inode; size_t data_bytes; - switch (ip->i_d.di_aformat) { + switch (ip->i_afp->if_format) { case XFS_DINODE_FMT_EXTENTS: iip->ili_fields &= ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); if ((iip->ili_fields & XFS_ILOG_AEXT) && - ip->i_d.di_anextents > 0 && + ip->i_afp->if_nextents > 0 && ip->i_afp->if_bytes > 0) { struct xfs_bmbt_rec *p; ASSERT(xfs_iext_count(ip->i_afp) == - ip->i_d.di_anextents); + ip->i_afp->if_nextents); p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT); data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK); @@ -305,7 +305,7 @@ xfs_inode_to_log_dinode( struct inode *inode = VFS_I(ip); to->di_magic = XFS_DINODE_MAGIC; - to->di_format = from->di_format; + to->di_format = xfs_ifork_format(&ip->i_df); to->di_uid = i_uid_read(inode); to->di_gid = i_gid_read(inode); to->di_projid_lo = from->di_projid & 0xffff; @@ -326,10 +326,10 @@ xfs_inode_to_log_dinode( to->di_size = from->di_size; to->di_nblocks = from->di_nblocks; to->di_extsize = from->di_extsize; - to->di_nextents = from->di_nextents; - to->di_anextents = from->di_anextents; + to->di_nextents = xfs_ifork_nextents(&ip->i_df); + to->di_anextents = xfs_ifork_nextents(ip->i_afp); to->di_forkoff = from->di_forkoff; - to->di_aformat = from->di_aformat; + to->di_aformat = xfs_ifork_format(ip->i_afp); to->di_dmevmask = from->di_dmevmask; to->di_dmstate = from->di_dmstate; to->di_flags = from->di_flags; @@ -497,21 +497,6 @@ xfs_inode_item_push( if (xfs_ipincount(ip) > 0) return XFS_ITEM_PINNED; - /* - * The buffer containing this item failed to be written back - * previously. Resubmit the buffer for IO. - */ - if (test_bit(XFS_LI_FAILED, &lip->li_flags)) { - if (!xfs_buf_trylock(bp)) - return XFS_ITEM_LOCKED; - - if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list)) - rval = XFS_ITEM_FLUSHING; - - xfs_buf_unlock(bp); - return rval; - } - if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) return XFS_ITEM_LOCKED; @@ -777,17 +762,12 @@ xfs_iflush_done( */ void xfs_iflush_abort( - xfs_inode_t *ip, - bool stale) + struct xfs_inode *ip) { - xfs_inode_log_item_t *iip = ip->i_itemp; + struct xfs_inode_log_item *iip = ip->i_itemp; if (iip) { - if (test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags)) { - xfs_trans_ail_remove(&iip->ili_item, - stale ? SHUTDOWN_LOG_IO_ERROR : - SHUTDOWN_CORRUPT_INCORE); - } + xfs_trans_ail_delete(&iip->ili_item, 0); iip->ili_logged = 0; /* * Clear the ili_last_fields bits now that we know that the @@ -812,7 +792,7 @@ xfs_istale_done( struct xfs_buf *bp, struct xfs_log_item *lip) { - xfs_iflush_abort(INODE_ITEM(lip)->ili_inode, true); + xfs_iflush_abort(INODE_ITEM(lip)->ili_inode); } /* diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 07a60e74c39c..60b34bb66e8e 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -13,7 +13,7 @@ struct xfs_bmbt_rec; struct xfs_inode; struct xfs_mount; -typedef struct xfs_inode_log_item { +struct xfs_inode_log_item { struct xfs_log_item ili_item; /* common portion */ struct xfs_inode *ili_inode; /* inode ptr */ xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ @@ -23,7 +23,7 @@ typedef struct xfs_inode_log_item { unsigned int ili_last_fields; /* fields when flushed */ unsigned int ili_fields; /* fields to be logged */ unsigned int ili_fsync_fields; /* logged since last fsync */ -} xfs_inode_log_item_t; +}; static inline int xfs_inode_clean(xfs_inode_t *ip) { @@ -34,7 +34,7 @@ extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); extern void xfs_inode_item_destroy(struct xfs_inode *); extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *); extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *); -extern void xfs_iflush_abort(struct xfs_inode *, bool); +extern void xfs_iflush_abort(struct xfs_inode *); extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, struct xfs_inode_log_format *); diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c new file mode 100644 index 000000000000..dc3e26ff16c9 --- /dev/null +++ b/fs/xfs/xfs_inode_item_recover.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_inode.h" +#include "xfs_trans.h" +#include "xfs_inode_item.h" +#include "xfs_trace.h" +#include "xfs_trans_priv.h" +#include "xfs_buf_item.h" +#include "xfs_log.h" +#include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" +#include "xfs_icache.h" +#include "xfs_bmap_btree.h" + +STATIC void +xlog_recover_inode_ra_pass2( + struct xlog *log, + struct xlog_recover_item *item) +{ + if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { + struct xfs_inode_log_format *ilfp = item->ri_buf[0].i_addr; + + xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len, + &xfs_inode_buf_ra_ops); + } else { + struct xfs_inode_log_format_32 *ilfp = item->ri_buf[0].i_addr; + + xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len, + &xfs_inode_buf_ra_ops); + } +} + +/* + * Inode fork owner changes + * + * If we have been told that we have to reparent the inode fork, it's because an + * extent swap operation on a CRC enabled filesystem has been done and we are + * replaying it. We need to walk the BMBT of the appropriate fork and change the + * owners of it. + * + * The complexity here is that we don't have an inode context to work with, so + * after we've replayed the inode we need to instantiate one. This is where the + * fun begins. + * + * We are in the middle of log recovery, so we can't run transactions. That + * means we cannot use cache coherent inode instantiation via xfs_iget(), as + * that will result in the corresponding iput() running the inode through + * xfs_inactive(). If we've just replayed an inode core that changes the link + * count to zero (i.e. it's been unlinked), then xfs_inactive() will run + * transactions (bad!). + * + * So, to avoid this, we instantiate an inode directly from the inode core we've + * just recovered. We have the buffer still locked, and all we really need to + * instantiate is the inode core and the forks being modified. We can do this + * manually, then run the inode btree owner change, and then tear down the + * xfs_inode without having to run any transactions at all. + * + * Also, because we don't have a transaction context available here but need to + * gather all the buffers we modify for writeback so we pass the buffer_list + * instead for the operation to use. + */ + +STATIC int +xfs_recover_inode_owner_change( + struct xfs_mount *mp, + struct xfs_dinode *dip, + struct xfs_inode_log_format *in_f, + struct list_head *buffer_list) +{ + struct xfs_inode *ip; + int error; + + ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)); + + ip = xfs_inode_alloc(mp, in_f->ilf_ino); + if (!ip) + return -ENOMEM; + + /* instantiate the inode */ + ASSERT(dip->di_version >= 3); + + error = xfs_inode_from_disk(ip, dip); + if (error) + goto out_free_ip; + + if (in_f->ilf_fields & XFS_ILOG_DOWNER) { + ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT); + error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK, + ip->i_ino, buffer_list); + if (error) + goto out_free_ip; + } + + if (in_f->ilf_fields & XFS_ILOG_AOWNER) { + ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT); + error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK, + ip->i_ino, buffer_list); + if (error) + goto out_free_ip; + } + +out_free_ip: + xfs_inode_free(ip); + return error; +} + +STATIC int +xlog_recover_inode_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t current_lsn) +{ + struct xfs_inode_log_format *in_f; + struct xfs_mount *mp = log->l_mp; + struct xfs_buf *bp; + struct xfs_dinode *dip; + int len; + char *src; + char *dest; + int error; + int attr_index; + uint fields; + struct xfs_log_dinode *ldip; + uint isize; + int need_free = 0; + + if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { + in_f = item->ri_buf[0].i_addr; + } else { + in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0); + need_free = 1; + error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); + if (error) + goto error; + } + + /* + * Inode buffers can be freed, look out for it, + * and do not replay the inode. + */ + if (xlog_is_buffer_cancelled(log, in_f->ilf_blkno, in_f->ilf_len)) { + error = 0; + trace_xfs_log_recover_inode_cancel(log, in_f); + goto error; + } + trace_xfs_log_recover_inode_recover(log, in_f); + + error = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, + 0, &bp, &xfs_inode_buf_ops); + if (error) + goto error; + ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); + dip = xfs_buf_offset(bp, in_f->ilf_boffset); + + /* + * Make sure the place we're flushing out to really looks + * like an inode! + */ + if (XFS_IS_CORRUPT(mp, !xfs_verify_magic16(bp, dip->di_magic))) { + xfs_alert(mp, + "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld", + __func__, dip, bp, in_f->ilf_ino); + error = -EFSCORRUPTED; + goto out_release; + } + ldip = item->ri_buf[1].i_addr; + if (XFS_IS_CORRUPT(mp, ldip->di_magic != XFS_DINODE_MAGIC)) { + xfs_alert(mp, + "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld", + __func__, item, in_f->ilf_ino); + error = -EFSCORRUPTED; + goto out_release; + } + + /* + * If the inode has an LSN in it, recover the inode only if it's less + * than the lsn of the transaction we are replaying. Note: we still + * need to replay an owner change even though the inode is more recent + * than the transaction as there is no guarantee that all the btree + * blocks are more recent than this transaction, too. + */ + if (dip->di_version >= 3) { + xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); + + if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { + trace_xfs_log_recover_inode_skip(log, in_f); + error = 0; + goto out_owner_change; + } + } + + /* + * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes + * are transactional and if ordering is necessary we can determine that + * more accurately by the LSN field in the V3 inode core. Don't trust + * the inode versions we might be changing them here - use the + * superblock flag to determine whether we need to look at di_flushiter + * to skip replay when the on disk inode is newer than the log one + */ + if (!xfs_sb_version_has_v3inode(&mp->m_sb) && + ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) { + /* + * Deal with the wrap case, DI_MAX_FLUSH is less + * than smaller numbers + */ + if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && + ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) { + /* do nothing */ + } else { + trace_xfs_log_recover_inode_skip(log, in_f); + error = 0; + goto out_release; + } + } + + /* Take the opportunity to reset the flush iteration count */ + ldip->di_flushiter = 0; + + if (unlikely(S_ISREG(ldip->di_mode))) { + if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) && + (ldip->di_format != XFS_DINODE_FMT_BTREE)) { + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", + XFS_ERRLEVEL_LOW, mp, ldip, + sizeof(*ldip)); + xfs_alert(mp, + "%s: Bad regular inode log record, rec ptr "PTR_FMT", " + "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld", + __func__, item, dip, bp, in_f->ilf_ino); + error = -EFSCORRUPTED; + goto out_release; + } + } else if (unlikely(S_ISDIR(ldip->di_mode))) { + if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) && + (ldip->di_format != XFS_DINODE_FMT_BTREE) && + (ldip->di_format != XFS_DINODE_FMT_LOCAL)) { + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", + XFS_ERRLEVEL_LOW, mp, ldip, + sizeof(*ldip)); + xfs_alert(mp, + "%s: Bad dir inode log record, rec ptr "PTR_FMT", " + "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld", + __func__, item, dip, bp, in_f->ilf_ino); + error = -EFSCORRUPTED; + goto out_release; + } + } + if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){ + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", + XFS_ERRLEVEL_LOW, mp, ldip, + sizeof(*ldip)); + xfs_alert(mp, + "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", " + "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld", + __func__, item, dip, bp, in_f->ilf_ino, + ldip->di_nextents + ldip->di_anextents, + ldip->di_nblocks); + error = -EFSCORRUPTED; + goto out_release; + } + if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) { + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", + XFS_ERRLEVEL_LOW, mp, ldip, + sizeof(*ldip)); + xfs_alert(mp, + "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", " + "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__, + item, dip, bp, in_f->ilf_ino, ldip->di_forkoff); + error = -EFSCORRUPTED; + goto out_release; + } + isize = xfs_log_dinode_size(mp); + if (unlikely(item->ri_buf[1].i_len > isize)) { + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", + XFS_ERRLEVEL_LOW, mp, ldip, + sizeof(*ldip)); + xfs_alert(mp, + "%s: Bad inode log record length %d, rec ptr "PTR_FMT, + __func__, item->ri_buf[1].i_len, item); + error = -EFSCORRUPTED; + goto out_release; + } + + /* recover the log dinode inode into the on disk inode */ + xfs_log_dinode_to_disk(ldip, dip); + + fields = in_f->ilf_fields; + if (fields & XFS_ILOG_DEV) + xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev); + + if (in_f->ilf_size == 2) + goto out_owner_change; + len = item->ri_buf[2].i_len; + src = item->ri_buf[2].i_addr; + ASSERT(in_f->ilf_size <= 4); + ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); + ASSERT(!(fields & XFS_ILOG_DFORK) || + (len == in_f->ilf_dsize)); + + switch (fields & XFS_ILOG_DFORK) { + case XFS_ILOG_DDATA: + case XFS_ILOG_DEXT: + memcpy(XFS_DFORK_DPTR(dip), src, len); + break; + + case XFS_ILOG_DBROOT: + xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, + (struct xfs_bmdr_block *)XFS_DFORK_DPTR(dip), + XFS_DFORK_DSIZE(dip, mp)); + break; + + default: + /* + * There are no data fork flags set. + */ + ASSERT((fields & XFS_ILOG_DFORK) == 0); + break; + } + + /* + * If we logged any attribute data, recover it. There may or + * may not have been any other non-core data logged in this + * transaction. + */ + if (in_f->ilf_fields & XFS_ILOG_AFORK) { + if (in_f->ilf_fields & XFS_ILOG_DFORK) { + attr_index = 3; + } else { + attr_index = 2; + } + len = item->ri_buf[attr_index].i_len; + src = item->ri_buf[attr_index].i_addr; + ASSERT(len == in_f->ilf_asize); + + switch (in_f->ilf_fields & XFS_ILOG_AFORK) { + case XFS_ILOG_ADATA: + case XFS_ILOG_AEXT: + dest = XFS_DFORK_APTR(dip); + ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); + memcpy(dest, src, len); + break; + + case XFS_ILOG_ABROOT: + dest = XFS_DFORK_APTR(dip); + xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, + len, (struct xfs_bmdr_block *)dest, + XFS_DFORK_ASIZE(dip, mp)); + break; + + default: + xfs_warn(log->l_mp, "%s: Invalid flag", __func__); + ASSERT(0); + error = -EFSCORRUPTED; + goto out_release; + } + } + +out_owner_change: + /* Recover the swapext owner change unless inode has been deleted */ + if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) && + (dip->di_mode != 0)) + error = xfs_recover_inode_owner_change(mp, dip, in_f, + buffer_list); + /* re-generate the checksum. */ + xfs_dinode_calc_crc(log->l_mp, dip); + + ASSERT(bp->b_mount == mp); + bp->b_iodone = xlog_recover_iodone; + xfs_buf_delwri_queue(bp, buffer_list); + +out_release: + xfs_buf_relse(bp); +error: + if (need_free) + kmem_free(in_f); + return error; +} + +const struct xlog_recover_item_ops xlog_inode_item_ops = { + .item_type = XFS_LI_INODE, + .ra_pass2 = xlog_recover_inode_ra_pass2, + .commit_pass2 = xlog_recover_inode_commit_pass2, +}; diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 309958186d33..a40f88cf3ab7 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1104,26 +1104,17 @@ xfs_fill_fsxattr( bool attr, struct fsxattr *fa) { + struct xfs_ifork *ifp = attr ? ip->i_afp : &ip->i_df; + simple_fill_fsxattr(fa, xfs_ip2xflags(ip)); fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; fa->fsx_cowextsize = ip->i_d.di_cowextsize << ip->i_mount->m_sb.sb_blocklog; fa->fsx_projid = ip->i_d.di_projid; - - if (attr) { - if (ip->i_afp) { - if (ip->i_afp->if_flags & XFS_IFEXTENTS) - fa->fsx_nextents = xfs_iext_count(ip->i_afp); - else - fa->fsx_nextents = ip->i_d.di_anextents; - } else - fa->fsx_nextents = 0; - } else { - if (ip->i_df.if_flags & XFS_IFEXTENTS) - fa->fsx_nextents = xfs_iext_count(&ip->i_df); - else - fa->fsx_nextents = ip->i_d.di_nextents; - } + if (ifp && (ifp->if_flags & XFS_IFEXTENTS)) + fa->fsx_nextents = xfs_iext_count(ifp); + else + fa->fsx_nextents = xfs_ifork_nextents(ifp); } STATIC int @@ -1201,37 +1192,6 @@ xfs_flags2diflags2( return di_flags2; } -STATIC void -xfs_diflags_to_linux( - struct xfs_inode *ip) -{ - struct inode *inode = VFS_I(ip); - unsigned int xflags = xfs_ip2xflags(ip); - - if (xflags & FS_XFLAG_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; - else - inode->i_flags &= ~S_IMMUTABLE; - if (xflags & FS_XFLAG_APPEND) - inode->i_flags |= S_APPEND; - else - inode->i_flags &= ~S_APPEND; - if (xflags & FS_XFLAG_SYNC) - inode->i_flags |= S_SYNC; - else - inode->i_flags &= ~S_SYNC; - if (xflags & FS_XFLAG_NOATIME) - inode->i_flags |= S_NOATIME; - else - inode->i_flags &= ~S_NOATIME; -#if 0 /* disabled until the flag switching races are sorted out */ - if (xflags & FS_XFLAG_DAX) - inode->i_flags |= S_DAX; - else - inode->i_flags &= ~S_DAX; -#endif -} - static int xfs_ioctl_setattr_xflags( struct xfs_trans *tp, @@ -1242,7 +1202,7 @@ xfs_ioctl_setattr_xflags( uint64_t di_flags2; /* Can't change realtime flag if any extents are allocated. */ - if ((ip->i_d.di_nextents || ip->i_delayed_blks) && + if ((ip->i_df.if_nextents || ip->i_delayed_blks) && XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME)) return -EINVAL; @@ -1269,7 +1229,7 @@ xfs_ioctl_setattr_xflags( ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags); ip->i_d.di_flags2 = di_flags2; - xfs_diflags_to_linux(ip); + xfs_diflags_to_iflags(ip, false); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(mp, xs_ig_attrchg); @@ -1420,7 +1380,7 @@ xfs_ioctl_setattr_check_extsize( xfs_extlen_t size; xfs_fsblock_t extsize_fsb; - if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents && + if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents && ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize)) return -EINVAL; @@ -1513,7 +1473,6 @@ xfs_ioctl_setattr( struct fsxattr old_fa; struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; - struct xfs_dquot *udqp = NULL; struct xfs_dquot *pdqp = NULL; struct xfs_dquot *olddquot = NULL; int code; @@ -1536,7 +1495,7 @@ xfs_ioctl_setattr( if (XFS_IS_QUOTA_ON(mp)) { code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid, VFS_I(ip)->i_gid, fa->fsx_projid, - XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp); + XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp); if (code) return code; } @@ -1560,7 +1519,7 @@ xfs_ioctl_setattr( if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) && ip->i_d.di_projid != fa->fsx_projid) { - code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp, + code = xfs_qm_vop_chown_reserve(tp, ip, NULL, NULL, pdqp, capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); if (code) /* out of quota */ goto error_trans_cancel; @@ -1626,7 +1585,6 @@ xfs_ioctl_setattr( * Release any dquot(s) the inode had kept before chown. */ xfs_qm_dqrele(olddquot); - xfs_qm_dqrele(udqp); xfs_qm_dqrele(pdqp); return code; @@ -1634,7 +1592,6 @@ xfs_ioctl_setattr( error_trans_cancel: xfs_trans_cancel(tp); error_free_dquots: - xfs_qm_dqrele(udqp); xfs_qm_dqrele(pdqp); return code; } @@ -2082,6 +2039,41 @@ out: return error; } +static inline int +xfs_fs_eofblocks_from_user( + struct xfs_fs_eofblocks *src, + struct xfs_eofblocks *dst) +{ + if (src->eof_version != XFS_EOFBLOCKS_VERSION) + return -EINVAL; + + if (src->eof_flags & ~XFS_EOF_FLAGS_VALID) + return -EINVAL; + + if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) || + memchr_inv(src->pad64, 0, sizeof(src->pad64))) + return -EINVAL; + + dst->eof_flags = src->eof_flags; + dst->eof_prid = src->eof_prid; + dst->eof_min_file_size = src->eof_min_file_size; + + dst->eof_uid = INVALID_UID; + if (src->eof_flags & XFS_EOF_FLAGS_UID) { + dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid); + if (!uid_valid(dst->eof_uid)) + return -EINVAL; + } + + dst->eof_gid = INVALID_GID; + if (src->eof_flags & XFS_EOF_FLAGS_GID) { + dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid); + if (!gid_valid(dst->eof_gid)) + return -EINVAL; + } + return 0; +} + /* * Note: some of the ioctl's return positive numbers as a * byte count indicating success, such as readlink_by_handle. diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index bb590a267a7f..b9a8c3798e08 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -352,22 +352,10 @@ xfs_quota_calc_throttle( } /* - * If we are doing a write at the end of the file and there are no allocations - * past this one, then extend the allocation out to the file system's write - * iosize. - * * If we don't have a user specified preallocation size, dynamically increase * the preallocation size as the size of the file grows. Cap the maximum size * at a single extent or less if the filesystem is near full. The closer the - * filesystem is to full, the smaller the maximum prealocation. - * - * As an exception we don't do any preallocation at all if the file is smaller - * than the minimum preallocation and we are using the default dynamic - * preallocation scheme, as it is likely this is the only write to the file that - * is going to be done. - * - * We clean up any extra space left over when the file is closed in - * xfs_inactive(). + * filesystem is to being full, the smaller the maximum preallocation. */ STATIC xfs_fsblock_t xfs_iomap_prealloc_size( @@ -377,63 +365,70 @@ xfs_iomap_prealloc_size( loff_t count, struct xfs_iext_cursor *icur) { + struct xfs_iext_cursor ncur = *icur; + struct xfs_bmbt_irec prev, got; struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); - struct xfs_bmbt_irec prev; - int shift = 0; int64_t freesp; xfs_fsblock_t qblocks; - int qshift = 0; xfs_fsblock_t alloc_blocks = 0; + xfs_extlen_t plen; + int shift = 0; + int qshift = 0; - if (offset + count <= XFS_ISIZE(ip)) - return 0; - - if (!(mp->m_flags & XFS_MOUNT_ALLOCSIZE) && - (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))) + /* + * As an exception we don't do any preallocation at all if the file is + * smaller than the minimum preallocation and we are using the default + * dynamic preallocation scheme, as it is likely this is the only write + * to the file that is going to be done. + */ + if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)) return 0; /* - * If an explicit allocsize is set, the file is small, or we - * are writing behind a hole, then use the minimum prealloc: + * Use the minimum preallocation size for small files or if we are + * writing right after a hole. */ - if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) || - XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || - !xfs_iext_peek_prev_extent(ifp, icur, &prev) || + if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || + !xfs_iext_prev_extent(ifp, &ncur, &prev) || prev.br_startoff + prev.br_blockcount < offset_fsb) return mp->m_allocsize_blocks; /* - * Determine the initial size of the preallocation. We are beyond the - * current EOF here, but we need to take into account whether this is - * a sparse write or an extending write when determining the - * preallocation size. Hence we need to look up the extent that ends - * at the current write offset and use the result to determine the - * preallocation size. - * - * If the extent is a hole, then preallocation is essentially disabled. - * Otherwise we take the size of the preceding data extent as the basis - * for the preallocation size. If the size of the extent is greater than - * half the maximum extent length, then use the current offset as the - * basis. This ensures that for large files the preallocation size - * always extends to MAXEXTLEN rather than falling short due to things - * like stripe unit/width alignment of real extents. + * Take the size of the preceding data extents as the basis for the + * preallocation size. Note that we don't care if the previous extents + * are written or not. */ - if (prev.br_blockcount <= (MAXEXTLEN >> 1)) - alloc_blocks = prev.br_blockcount << 1; - else + plen = prev.br_blockcount; + while (xfs_iext_prev_extent(ifp, &ncur, &got)) { + if (plen > MAXEXTLEN / 2 || + isnullstartblock(got.br_startblock) || + got.br_startoff + got.br_blockcount != prev.br_startoff || + got.br_startblock + got.br_blockcount != prev.br_startblock) + break; + plen += got.br_blockcount; + prev = got; + } + + /* + * If the size of the extents is greater than half the maximum extent + * length, then use the current offset as the basis. This ensures that + * for large files the preallocation size always extends to MAXEXTLEN + * rather than falling short due to things like stripe unit/width + * alignment of real extents. + */ + alloc_blocks = plen * 2; + if (alloc_blocks > MAXEXTLEN) alloc_blocks = XFS_B_TO_FSB(mp, offset); - if (!alloc_blocks) - goto check_writeio; qblocks = alloc_blocks; /* * MAXEXTLEN is not a power of two value but we round the prealloc down * to the nearest power of two value after throttling. To prevent the - * round down from unconditionally reducing the maximum supported prealloc - * size, we round up first, apply appropriate throttling, round down and - * cap the value to MAXEXTLEN. + * round down from unconditionally reducing the maximum supported + * prealloc size, we round up first, apply appropriate throttling, + * round down and cap the value to MAXEXTLEN. */ alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), alloc_blocks); @@ -494,7 +489,6 @@ xfs_iomap_prealloc_size( */ while (alloc_blocks && alloc_blocks >= freesp) alloc_blocks >>= 4; -check_writeio: if (alloc_blocks < mp->m_allocsize_blocks) alloc_blocks = mp->m_allocsize_blocks; trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, @@ -563,7 +557,7 @@ xfs_iomap_write_unwritten( xfs_trans_ijoin(tp, ip, 0); error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0, - XFS_QMOPT_RES_REGBLKS); + XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES); if (error) goto error_on_bmapi_transaction; @@ -856,7 +850,7 @@ xfs_buffered_write_iomap_begin( xfs_ilock(ip, XFS_ILOCK_EXCL); - if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, XFS_DATA_FORK)) || + if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { error = -EFSCORRUPTED; goto out_unlock; @@ -961,9 +955,16 @@ xfs_buffered_write_iomap_begin( if (error) goto out_unlock; - if (eof) { - prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, offset, - count, &icur); + if (eof && offset + count > XFS_ISIZE(ip)) { + /* + * Determine the initial size of the preallocation. + * We clean up any extra preallocation when the file is closed. + */ + if (mp->m_flags & XFS_MOUNT_ALLOCSIZE) + prealloc_blocks = mp->m_allocsize_blocks; + else + prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, + offset, count, &icur); if (prealloc_blocks) { xfs_extlen_t align; xfs_off_t end_offset; @@ -1258,12 +1259,12 @@ xfs_xattr_iomap_begin( lockmode = xfs_ilock_attr_map_shared(ip); /* if there are no attribute fork or extents, return ENOENT */ - if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) { + if (!XFS_IFORK_Q(ip) || !ip->i_afp->if_nextents) { error = -ENOENT; goto out_unlock; } - ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); + ASSERT(ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL); error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, &nimaps, XFS_BMAPI_ATTRFORK); out_unlock: diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index f7a99b3bbcf7..202b2c0a9e9d 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -25,6 +25,7 @@ #include <linux/posix_acl.h> #include <linux/security.h> #include <linux/iversion.h> +#include <linux/fiemap.h> /* * Directories have different lock order w.r.t. mmap_sem compared to regular @@ -738,12 +739,7 @@ xfs_setattr_nonsize( if (error) /* out of quota */ goto out_cancel; } - } - /* - * Change file ownership. Must be the owner or privileged. - */ - if (mask & (ATTR_UID|ATTR_GID)) { /* * CAP_FSETID overrides the following restrictions: * @@ -877,7 +873,7 @@ xfs_setattr_size( /* * Short circuit the truncate case for zero length files. */ - if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { + if (newsize == 0 && oldsize == 0 && ip->i_df.if_nextents == 0) { if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME))) return 0; @@ -1243,13 +1239,12 @@ xfs_inode_supports_dax( { struct xfs_mount *mp = ip->i_mount; - /* Only supported on non-reflinked files. */ - if (!S_ISREG(VFS_I(ip)->i_mode) || xfs_is_reflink_inode(ip)) + /* Only supported on regular files. */ + if (!S_ISREG(VFS_I(ip)->i_mode)) return false; - /* DAX mount option or DAX iflag must be set. */ - if (!(mp->m_flags & XFS_MOUNT_DAX) && - !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) + /* Only supported on non-reflinked files. */ + if (xfs_is_reflink_inode(ip)) return false; /* Block size must match page size */ @@ -1260,26 +1255,51 @@ xfs_inode_supports_dax( return xfs_inode_buftarg(ip)->bt_daxdev != NULL; } -STATIC void +static bool +xfs_inode_should_enable_dax( + struct xfs_inode *ip) +{ + if (!IS_ENABLED(CONFIG_FS_DAX)) + return false; + if (ip->i_mount->m_flags & XFS_MOUNT_DAX_NEVER) + return false; + if (!xfs_inode_supports_dax(ip)) + return false; + if (ip->i_mount->m_flags & XFS_MOUNT_DAX_ALWAYS) + return true; + if (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX) + return true; + return false; +} + +void xfs_diflags_to_iflags( - struct inode *inode, - struct xfs_inode *ip) + struct xfs_inode *ip, + bool init) { - uint16_t flags = ip->i_d.di_flags; - - inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC | - S_NOATIME | S_DAX); - - if (flags & XFS_DIFLAG_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; - if (flags & XFS_DIFLAG_APPEND) - inode->i_flags |= S_APPEND; - if (flags & XFS_DIFLAG_SYNC) - inode->i_flags |= S_SYNC; - if (flags & XFS_DIFLAG_NOATIME) - inode->i_flags |= S_NOATIME; - if (xfs_inode_supports_dax(ip)) - inode->i_flags |= S_DAX; + struct inode *inode = VFS_I(ip); + unsigned int xflags = xfs_ip2xflags(ip); + unsigned int flags = 0; + + ASSERT(!(IS_DAX(inode) && init)); + + if (xflags & FS_XFLAG_IMMUTABLE) + flags |= S_IMMUTABLE; + if (xflags & FS_XFLAG_APPEND) + flags |= S_APPEND; + if (xflags & FS_XFLAG_SYNC) + flags |= S_SYNC; + if (xflags & FS_XFLAG_NOATIME) + flags |= S_NOATIME; + if (init && xfs_inode_should_enable_dax(ip)) + flags |= S_DAX; + + /* + * S_DAX can only be set during inode initialization and is never set by + * the VFS, so we cannot mask off S_DAX in i_flags. + */ + inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC | S_NOATIME); + inode->i_flags |= flags; } /* @@ -1305,7 +1325,7 @@ xfs_setup_inode( inode_fake_hash(inode); i_size_write(inode, ip->i_d.di_size); - xfs_diflags_to_iflags(inode, ip); + xfs_diflags_to_iflags(ip, true); if (S_ISDIR(inode->i_mode)) { /* diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index ff2da28fed90..16ca97a7ff00 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -104,9 +104,9 @@ xfs_bulkstat_one_int( buf->bs_xflags = xfs_ip2xflags(ip); buf->bs_extsize_blks = dic->di_extsize; - buf->bs_extents = dic->di_nextents; + buf->bs_extents = xfs_ifork_nextents(&ip->i_df); xfs_bulkstat_health(ip, buf); - buf->bs_aextents = dic->di_anextents; + buf->bs_aextents = xfs_ifork_nextents(ip->i_afp); buf->bs_forkoff = XFS_IFORK_BOFF(ip); buf->bs_version = XFS_BULKSTAT_VERSION_V5; @@ -115,7 +115,7 @@ xfs_bulkstat_one_int( buf->bs_cowextsize_blks = dic->di_cowextsize; } - switch (dic->di_format) { + switch (ip->i_df.if_format) { case XFS_DINODE_FMT_DEV: buf->bs_rdev = sysv_encode_dev(inode->i_rdev); buf->bs_blksize = BLKDEV_IOSIZE; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 11c3502b07b1..ec015df55b77 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -18,21 +18,13 @@ #include "xfs_log.h" #include "xfs_log_priv.h" #include "xfs_log_recover.h" -#include "xfs_inode_item.h" -#include "xfs_extfree_item.h" #include "xfs_trans_priv.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" -#include "xfs_quota.h" #include "xfs_trace.h" #include "xfs_icache.h" -#include "xfs_bmap_btree.h" #include "xfs_error.h" -#include "xfs_dir2.h" -#include "xfs_rmap_item.h" #include "xfs_buf_item.h" -#include "xfs_refcount_item.h" -#include "xfs_bmap_item.h" #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) @@ -56,17 +48,6 @@ xlog_do_recovery_pass( struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *); /* - * This structure is used during recovery to record the buf log items which - * have been canceled and should not be replayed. - */ -struct xfs_buf_cancel { - xfs_daddr_t bc_blkno; - uint bc_len; - int bc_refcount; - struct list_head bc_list; -}; - -/* * Sector aligned buffer routines for buffer create/read/write/access */ @@ -284,7 +265,7 @@ xlog_header_check_mount( return 0; } -STATIC void +void xlog_recover_iodone( struct xfs_buf *bp) { @@ -1779,12 +1760,72 @@ xlog_clear_stale_blocks( return 0; } +/* + * Release the recovered intent item in the AIL that matches the given intent + * type and intent id. + */ +void +xlog_recover_release_intent( + struct xlog *log, + unsigned short intent_type, + uint64_t intent_id) +{ + struct xfs_ail_cursor cur; + struct xfs_log_item *lip; + struct xfs_ail *ailp = log->l_ailp; + + spin_lock(&ailp->ail_lock); + for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL; + lip = xfs_trans_ail_cursor_next(ailp, &cur)) { + if (lip->li_type != intent_type) + continue; + if (!lip->li_ops->iop_match(lip, intent_id)) + continue; + + spin_unlock(&ailp->ail_lock); + lip->li_ops->iop_release(lip); + spin_lock(&ailp->ail_lock); + break; + } + + xfs_trans_ail_cursor_done(&cur); + spin_unlock(&ailp->ail_lock); +} + /****************************************************************************** * * Log recover routines * ****************************************************************************** */ +static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = { + &xlog_buf_item_ops, + &xlog_inode_item_ops, + &xlog_dquot_item_ops, + &xlog_quotaoff_item_ops, + &xlog_icreate_item_ops, + &xlog_efi_item_ops, + &xlog_efd_item_ops, + &xlog_rui_item_ops, + &xlog_rud_item_ops, + &xlog_cui_item_ops, + &xlog_cud_item_ops, + &xlog_bui_item_ops, + &xlog_bud_item_ops, +}; + +static const struct xlog_recover_item_ops * +xlog_find_item_ops( + struct xlog_recover_item *item) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++) + if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type) + return xlog_recover_item_ops[i]; + + return NULL; +} /* * Sort the log items in the transaction. @@ -1841,54 +1882,23 @@ xlog_recover_reorder_trans( struct xlog_recover *trans, int pass) { - xlog_recover_item_t *item, *n; + struct xlog_recover_item *item, *n; int error = 0; LIST_HEAD(sort_list); LIST_HEAD(cancel_list); LIST_HEAD(buffer_list); LIST_HEAD(inode_buffer_list); - LIST_HEAD(inode_list); + LIST_HEAD(item_list); list_splice_init(&trans->r_itemq, &sort_list); list_for_each_entry_safe(item, n, &sort_list, ri_list) { - xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; + enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST; - switch (ITEM_TYPE(item)) { - case XFS_LI_ICREATE: - list_move_tail(&item->ri_list, &buffer_list); - break; - case XFS_LI_BUF: - if (buf_f->blf_flags & XFS_BLF_CANCEL) { - trace_xfs_log_recover_item_reorder_head(log, - trans, item, pass); - list_move(&item->ri_list, &cancel_list); - break; - } - if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { - list_move(&item->ri_list, &inode_buffer_list); - break; - } - list_move_tail(&item->ri_list, &buffer_list); - break; - case XFS_LI_INODE: - case XFS_LI_DQUOT: - case XFS_LI_QUOTAOFF: - case XFS_LI_EFD: - case XFS_LI_EFI: - case XFS_LI_RUI: - case XFS_LI_RUD: - case XFS_LI_CUI: - case XFS_LI_CUD: - case XFS_LI_BUI: - case XFS_LI_BUD: - trace_xfs_log_recover_item_reorder_tail(log, - trans, item, pass); - list_move_tail(&item->ri_list, &inode_list); - break; - default: + item->ri_ops = xlog_find_item_ops(item); + if (!item->ri_ops) { xfs_warn(log->l_mp, - "%s: unrecognized type of log operation", - __func__); + "%s: unrecognized type of log operation (%d)", + __func__, ITEM_TYPE(item)); ASSERT(0); /* * return the remaining items back to the transaction @@ -1896,16 +1906,38 @@ xlog_recover_reorder_trans( */ if (!list_empty(&sort_list)) list_splice_init(&sort_list, &trans->r_itemq); - error = -EIO; - goto out; + error = -EFSCORRUPTED; + break; + } + + if (item->ri_ops->reorder) + fate = item->ri_ops->reorder(item); + + switch (fate) { + case XLOG_REORDER_BUFFER_LIST: + list_move_tail(&item->ri_list, &buffer_list); + break; + case XLOG_REORDER_CANCEL_LIST: + trace_xfs_log_recover_item_reorder_head(log, + trans, item, pass); + list_move(&item->ri_list, &cancel_list); + break; + case XLOG_REORDER_INODE_BUFFER_LIST: + list_move(&item->ri_list, &inode_buffer_list); + break; + case XLOG_REORDER_ITEM_LIST: + trace_xfs_log_recover_item_reorder_tail(log, + trans, item, pass); + list_move_tail(&item->ri_list, &item_list); + break; } } -out: + ASSERT(list_empty(&sort_list)); if (!list_empty(&buffer_list)) list_splice(&buffer_list, &trans->r_itemq); - if (!list_empty(&inode_list)) - list_splice_tail(&inode_list, &trans->r_itemq); + if (!list_empty(&item_list)) + list_splice_tail(&item_list, &trans->r_itemq); if (!list_empty(&inode_buffer_list)) list_splice_tail(&inode_buffer_list, &trans->r_itemq); if (!list_empty(&cancel_list)) @@ -1913,2152 +1945,15 @@ out: return error; } -/* - * Build up the table of buf cancel records so that we don't replay - * cancelled data in the second pass. For buffer records that are - * not cancel records, there is nothing to do here so we just return. - * - * If we get a cancel record which is already in the table, this indicates - * that the buffer was cancelled multiple times. In order to ensure - * that during pass 2 we keep the record in the table until we reach its - * last occurrence in the log, we keep a reference count in the cancel - * record in the table to tell us how many times we expect to see this - * record during the second pass. - */ -STATIC int -xlog_recover_buffer_pass1( - struct xlog *log, - struct xlog_recover_item *item) -{ - xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; - struct list_head *bucket; - struct xfs_buf_cancel *bcp; - - if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) { - xfs_err(log->l_mp, "bad buffer log item size (%d)", - item->ri_buf[0].i_len); - return -EFSCORRUPTED; - } - - /* - * If this isn't a cancel buffer item, then just return. - */ - if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { - trace_xfs_log_recover_buf_not_cancel(log, buf_f); - return 0; - } - - /* - * Insert an xfs_buf_cancel record into the hash table of them. - * If there is already an identical record, bump its reference count. - */ - bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); - list_for_each_entry(bcp, bucket, bc_list) { - if (bcp->bc_blkno == buf_f->blf_blkno && - bcp->bc_len == buf_f->blf_len) { - bcp->bc_refcount++; - trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); - return 0; - } - } - - bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0); - bcp->bc_blkno = buf_f->blf_blkno; - bcp->bc_len = buf_f->blf_len; - bcp->bc_refcount = 1; - list_add_tail(&bcp->bc_list, bucket); - - trace_xfs_log_recover_buf_cancel_add(log, buf_f); - return 0; -} - -/* - * Check to see whether the buffer being recovered has a corresponding - * entry in the buffer cancel record table. If it is, return the cancel - * buffer structure to the caller. - */ -STATIC struct xfs_buf_cancel * -xlog_peek_buffer_cancelled( - struct xlog *log, - xfs_daddr_t blkno, - uint len, - unsigned short flags) -{ - struct list_head *bucket; - struct xfs_buf_cancel *bcp; - - if (!log->l_buf_cancel_table) { - /* empty table means no cancelled buffers in the log */ - ASSERT(!(flags & XFS_BLF_CANCEL)); - return NULL; - } - - bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); - list_for_each_entry(bcp, bucket, bc_list) { - if (bcp->bc_blkno == blkno && bcp->bc_len == len) - return bcp; - } - - /* - * We didn't find a corresponding entry in the table, so return 0 so - * that the buffer is NOT cancelled. - */ - ASSERT(!(flags & XFS_BLF_CANCEL)); - return NULL; -} - -/* - * If the buffer is being cancelled then return 1 so that it will be cancelled, - * otherwise return 0. If the buffer is actually a buffer cancel item - * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the - * table and remove it from the table if this is the last reference. - * - * We remove the cancel record from the table when we encounter its last - * occurrence in the log so that if the same buffer is re-used again after its - * last cancellation we actually replay the changes made at that point. - */ -STATIC int -xlog_check_buffer_cancelled( +void +xlog_buf_readahead( struct xlog *log, xfs_daddr_t blkno, uint len, - unsigned short flags) -{ - struct xfs_buf_cancel *bcp; - - bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags); - if (!bcp) - return 0; - - /* - * We've go a match, so return 1 so that the recovery of this buffer - * is cancelled. If this buffer is actually a buffer cancel log - * item, then decrement the refcount on the one in the table and - * remove it if this is the last reference. - */ - if (flags & XFS_BLF_CANCEL) { - if (--bcp->bc_refcount == 0) { - list_del(&bcp->bc_list); - kmem_free(bcp); - } - } - return 1; -} - -/* - * Perform recovery for a buffer full of inodes. In these buffers, the only - * data which should be recovered is that which corresponds to the - * di_next_unlinked pointers in the on disk inode structures. The rest of the - * data for the inodes is always logged through the inodes themselves rather - * than the inode buffer and is recovered in xlog_recover_inode_pass2(). - * - * The only time when buffers full of inodes are fully recovered is when the - * buffer is full of newly allocated inodes. In this case the buffer will - * not be marked as an inode buffer and so will be sent to - * xlog_recover_do_reg_buffer() below during recovery. - */ -STATIC int -xlog_recover_do_inode_buffer( - struct xfs_mount *mp, - xlog_recover_item_t *item, - struct xfs_buf *bp, - xfs_buf_log_format_t *buf_f) -{ - int i; - int item_index = 0; - int bit = 0; - int nbits = 0; - int reg_buf_offset = 0; - int reg_buf_bytes = 0; - int next_unlinked_offset; - int inodes_per_buf; - xfs_agino_t *logged_nextp; - xfs_agino_t *buffer_nextp; - - trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); - - /* - * Post recovery validation only works properly on CRC enabled - * filesystems. - */ - if (xfs_sb_version_hascrc(&mp->m_sb)) - bp->b_ops = &xfs_inode_buf_ops; - - inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog; - for (i = 0; i < inodes_per_buf; i++) { - next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + - offsetof(xfs_dinode_t, di_next_unlinked); - - while (next_unlinked_offset >= - (reg_buf_offset + reg_buf_bytes)) { - /* - * The next di_next_unlinked field is beyond - * the current logged region. Find the next - * logged region that contains or is beyond - * the current di_next_unlinked field. - */ - bit += nbits; - bit = xfs_next_bit(buf_f->blf_data_map, - buf_f->blf_map_size, bit); - - /* - * If there are no more logged regions in the - * buffer, then we're done. - */ - if (bit == -1) - return 0; - - nbits = xfs_contig_bits(buf_f->blf_data_map, - buf_f->blf_map_size, bit); - ASSERT(nbits > 0); - reg_buf_offset = bit << XFS_BLF_SHIFT; - reg_buf_bytes = nbits << XFS_BLF_SHIFT; - item_index++; - } - - /* - * If the current logged region starts after the current - * di_next_unlinked field, then move on to the next - * di_next_unlinked field. - */ - if (next_unlinked_offset < reg_buf_offset) - continue; - - ASSERT(item->ri_buf[item_index].i_addr != NULL); - ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); - ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length)); - - /* - * The current logged region contains a copy of the - * current di_next_unlinked field. Extract its value - * and copy it to the buffer copy. - */ - logged_nextp = item->ri_buf[item_index].i_addr + - next_unlinked_offset - reg_buf_offset; - if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) { - xfs_alert(mp, - "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). " - "Trying to replay bad (0) inode di_next_unlinked field.", - item, bp); - return -EFSCORRUPTED; - } - - buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset); - *buffer_nextp = *logged_nextp; - - /* - * If necessary, recalculate the CRC in the on-disk inode. We - * have to leave the inode in a consistent state for whoever - * reads it next.... - */ - xfs_dinode_calc_crc(mp, - xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); - - } - - return 0; -} - -/* - * V5 filesystems know the age of the buffer on disk being recovered. We can - * have newer objects on disk than we are replaying, and so for these cases we - * don't want to replay the current change as that will make the buffer contents - * temporarily invalid on disk. - * - * The magic number might not match the buffer type we are going to recover - * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence - * extract the LSN of the existing object in the buffer based on it's current - * magic number. If we don't recognise the magic number in the buffer, then - * return a LSN of -1 so that the caller knows it was an unrecognised block and - * so can recover the buffer. - * - * Note: we cannot rely solely on magic number matches to determine that the - * buffer has a valid LSN - we also need to verify that it belongs to this - * filesystem, so we need to extract the object's LSN and compare it to that - * which we read from the superblock. If the UUIDs don't match, then we've got a - * stale metadata block from an old filesystem instance that we need to recover - * over the top of. - */ -static xfs_lsn_t -xlog_recover_get_buf_lsn( - struct xfs_mount *mp, - struct xfs_buf *bp) -{ - uint32_t magic32; - uint16_t magic16; - uint16_t magicda; - void *blk = bp->b_addr; - uuid_t *uuid; - xfs_lsn_t lsn = -1; - - /* v4 filesystems always recover immediately */ - if (!xfs_sb_version_hascrc(&mp->m_sb)) - goto recover_immediately; - - magic32 = be32_to_cpu(*(__be32 *)blk); - switch (magic32) { - case XFS_ABTB_CRC_MAGIC: - case XFS_ABTC_CRC_MAGIC: - case XFS_ABTB_MAGIC: - case XFS_ABTC_MAGIC: - case XFS_RMAP_CRC_MAGIC: - case XFS_REFC_CRC_MAGIC: - case XFS_IBT_CRC_MAGIC: - case XFS_IBT_MAGIC: { - struct xfs_btree_block *btb = blk; - - lsn = be64_to_cpu(btb->bb_u.s.bb_lsn); - uuid = &btb->bb_u.s.bb_uuid; - break; - } - case XFS_BMAP_CRC_MAGIC: - case XFS_BMAP_MAGIC: { - struct xfs_btree_block *btb = blk; - - lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); - uuid = &btb->bb_u.l.bb_uuid; - break; - } - case XFS_AGF_MAGIC: - lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); - uuid = &((struct xfs_agf *)blk)->agf_uuid; - break; - case XFS_AGFL_MAGIC: - lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); - uuid = &((struct xfs_agfl *)blk)->agfl_uuid; - break; - case XFS_AGI_MAGIC: - lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); - uuid = &((struct xfs_agi *)blk)->agi_uuid; - break; - case XFS_SYMLINK_MAGIC: - lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); - uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid; - break; - case XFS_DIR3_BLOCK_MAGIC: - case XFS_DIR3_DATA_MAGIC: - case XFS_DIR3_FREE_MAGIC: - lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); - uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; - break; - case XFS_ATTR3_RMT_MAGIC: - /* - * Remote attr blocks are written synchronously, rather than - * being logged. That means they do not contain a valid LSN - * (i.e. transactionally ordered) in them, and hence any time we - * see a buffer to replay over the top of a remote attribute - * block we should simply do so. - */ - goto recover_immediately; - case XFS_SB_MAGIC: - /* - * superblock uuids are magic. We may or may not have a - * sb_meta_uuid on disk, but it will be set in the in-core - * superblock. We set the uuid pointer for verification - * according to the superblock feature mask to ensure we check - * the relevant UUID in the superblock. - */ - lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); - if (xfs_sb_version_hasmetauuid(&mp->m_sb)) - uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid; - else - uuid = &((struct xfs_dsb *)blk)->sb_uuid; - break; - default: - break; - } - - if (lsn != (xfs_lsn_t)-1) { - if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid)) - goto recover_immediately; - return lsn; - } - - magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); - switch (magicda) { - case XFS_DIR3_LEAF1_MAGIC: - case XFS_DIR3_LEAFN_MAGIC: - case XFS_DA3_NODE_MAGIC: - lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); - uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; - break; - default: - break; - } - - if (lsn != (xfs_lsn_t)-1) { - if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) - goto recover_immediately; - return lsn; - } - - /* - * We do individual object checks on dquot and inode buffers as they - * have their own individual LSN records. Also, we could have a stale - * buffer here, so we have to at least recognise these buffer types. - * - * A notd complexity here is inode unlinked list processing - it logs - * the inode directly in the buffer, but we don't know which inodes have - * been modified, and there is no global buffer LSN. Hence we need to - * recover all inode buffer types immediately. This problem will be - * fixed by logical logging of the unlinked list modifications. - */ - magic16 = be16_to_cpu(*(__be16 *)blk); - switch (magic16) { - case XFS_DQUOT_MAGIC: - case XFS_DINODE_MAGIC: - goto recover_immediately; - default: - break; - } - - /* unknown buffer contents, recover immediately */ - -recover_immediately: - return (xfs_lsn_t)-1; - -} - -/* - * Validate the recovered buffer is of the correct type and attach the - * appropriate buffer operations to them for writeback. Magic numbers are in a - * few places: - * the first 16 bits of the buffer (inode buffer, dquot buffer), - * the first 32 bits of the buffer (most blocks), - * inside a struct xfs_da_blkinfo at the start of the buffer. - */ -static void -xlog_recover_validate_buf_type( - struct xfs_mount *mp, - struct xfs_buf *bp, - xfs_buf_log_format_t *buf_f, - xfs_lsn_t current_lsn) -{ - struct xfs_da_blkinfo *info = bp->b_addr; - uint32_t magic32; - uint16_t magic16; - uint16_t magicda; - char *warnmsg = NULL; - - /* - * We can only do post recovery validation on items on CRC enabled - * fielsystems as we need to know when the buffer was written to be able - * to determine if we should have replayed the item. If we replay old - * metadata over a newer buffer, then it will enter a temporarily - * inconsistent state resulting in verification failures. Hence for now - * just avoid the verification stage for non-crc filesystems - */ - if (!xfs_sb_version_hascrc(&mp->m_sb)) - return; - - magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); - magic16 = be16_to_cpu(*(__be16*)bp->b_addr); - magicda = be16_to_cpu(info->magic); - switch (xfs_blft_from_flags(buf_f)) { - case XFS_BLFT_BTREE_BUF: - switch (magic32) { - case XFS_ABTB_CRC_MAGIC: - case XFS_ABTB_MAGIC: - bp->b_ops = &xfs_bnobt_buf_ops; - break; - case XFS_ABTC_CRC_MAGIC: - case XFS_ABTC_MAGIC: - bp->b_ops = &xfs_cntbt_buf_ops; - break; - case XFS_IBT_CRC_MAGIC: - case XFS_IBT_MAGIC: - bp->b_ops = &xfs_inobt_buf_ops; - break; - case XFS_FIBT_CRC_MAGIC: - case XFS_FIBT_MAGIC: - bp->b_ops = &xfs_finobt_buf_ops; - break; - case XFS_BMAP_CRC_MAGIC: - case XFS_BMAP_MAGIC: - bp->b_ops = &xfs_bmbt_buf_ops; - break; - case XFS_RMAP_CRC_MAGIC: - bp->b_ops = &xfs_rmapbt_buf_ops; - break; - case XFS_REFC_CRC_MAGIC: - bp->b_ops = &xfs_refcountbt_buf_ops; - break; - default: - warnmsg = "Bad btree block magic!"; - break; - } - break; - case XFS_BLFT_AGF_BUF: - if (magic32 != XFS_AGF_MAGIC) { - warnmsg = "Bad AGF block magic!"; - break; - } - bp->b_ops = &xfs_agf_buf_ops; - break; - case XFS_BLFT_AGFL_BUF: - if (magic32 != XFS_AGFL_MAGIC) { - warnmsg = "Bad AGFL block magic!"; - break; - } - bp->b_ops = &xfs_agfl_buf_ops; - break; - case XFS_BLFT_AGI_BUF: - if (magic32 != XFS_AGI_MAGIC) { - warnmsg = "Bad AGI block magic!"; - break; - } - bp->b_ops = &xfs_agi_buf_ops; - break; - case XFS_BLFT_UDQUOT_BUF: - case XFS_BLFT_PDQUOT_BUF: - case XFS_BLFT_GDQUOT_BUF: -#ifdef CONFIG_XFS_QUOTA - if (magic16 != XFS_DQUOT_MAGIC) { - warnmsg = "Bad DQUOT block magic!"; - break; - } - bp->b_ops = &xfs_dquot_buf_ops; -#else - xfs_alert(mp, - "Trying to recover dquots without QUOTA support built in!"); - ASSERT(0); -#endif - break; - case XFS_BLFT_DINO_BUF: - if (magic16 != XFS_DINODE_MAGIC) { - warnmsg = "Bad INODE block magic!"; - break; - } - bp->b_ops = &xfs_inode_buf_ops; - break; - case XFS_BLFT_SYMLINK_BUF: - if (magic32 != XFS_SYMLINK_MAGIC) { - warnmsg = "Bad symlink block magic!"; - break; - } - bp->b_ops = &xfs_symlink_buf_ops; - break; - case XFS_BLFT_DIR_BLOCK_BUF: - if (magic32 != XFS_DIR2_BLOCK_MAGIC && - magic32 != XFS_DIR3_BLOCK_MAGIC) { - warnmsg = "Bad dir block magic!"; - break; - } - bp->b_ops = &xfs_dir3_block_buf_ops; - break; - case XFS_BLFT_DIR_DATA_BUF: - if (magic32 != XFS_DIR2_DATA_MAGIC && - magic32 != XFS_DIR3_DATA_MAGIC) { - warnmsg = "Bad dir data magic!"; - break; - } - bp->b_ops = &xfs_dir3_data_buf_ops; - break; - case XFS_BLFT_DIR_FREE_BUF: - if (magic32 != XFS_DIR2_FREE_MAGIC && - magic32 != XFS_DIR3_FREE_MAGIC) { - warnmsg = "Bad dir3 free magic!"; - break; - } - bp->b_ops = &xfs_dir3_free_buf_ops; - break; - case XFS_BLFT_DIR_LEAF1_BUF: - if (magicda != XFS_DIR2_LEAF1_MAGIC && - magicda != XFS_DIR3_LEAF1_MAGIC) { - warnmsg = "Bad dir leaf1 magic!"; - break; - } - bp->b_ops = &xfs_dir3_leaf1_buf_ops; - break; - case XFS_BLFT_DIR_LEAFN_BUF: - if (magicda != XFS_DIR2_LEAFN_MAGIC && - magicda != XFS_DIR3_LEAFN_MAGIC) { - warnmsg = "Bad dir leafn magic!"; - break; - } - bp->b_ops = &xfs_dir3_leafn_buf_ops; - break; - case XFS_BLFT_DA_NODE_BUF: - if (magicda != XFS_DA_NODE_MAGIC && - magicda != XFS_DA3_NODE_MAGIC) { - warnmsg = "Bad da node magic!"; - break; - } - bp->b_ops = &xfs_da3_node_buf_ops; - break; - case XFS_BLFT_ATTR_LEAF_BUF: - if (magicda != XFS_ATTR_LEAF_MAGIC && - magicda != XFS_ATTR3_LEAF_MAGIC) { - warnmsg = "Bad attr leaf magic!"; - break; - } - bp->b_ops = &xfs_attr3_leaf_buf_ops; - break; - case XFS_BLFT_ATTR_RMT_BUF: - if (magic32 != XFS_ATTR3_RMT_MAGIC) { - warnmsg = "Bad attr remote magic!"; - break; - } - bp->b_ops = &xfs_attr3_rmt_buf_ops; - break; - case XFS_BLFT_SB_BUF: - if (magic32 != XFS_SB_MAGIC) { - warnmsg = "Bad SB block magic!"; - break; - } - bp->b_ops = &xfs_sb_buf_ops; - break; -#ifdef CONFIG_XFS_RT - case XFS_BLFT_RTBITMAP_BUF: - case XFS_BLFT_RTSUMMARY_BUF: - /* no magic numbers for verification of RT buffers */ - bp->b_ops = &xfs_rtbuf_ops; - break; -#endif /* CONFIG_XFS_RT */ - default: - xfs_warn(mp, "Unknown buffer type %d!", - xfs_blft_from_flags(buf_f)); - break; - } - - /* - * Nothing else to do in the case of a NULL current LSN as this means - * the buffer is more recent than the change in the log and will be - * skipped. - */ - if (current_lsn == NULLCOMMITLSN) - return; - - if (warnmsg) { - xfs_warn(mp, warnmsg); - ASSERT(0); - } - - /* - * We must update the metadata LSN of the buffer as it is written out to - * ensure that older transactions never replay over this one and corrupt - * the buffer. This can occur if log recovery is interrupted at some - * point after the current transaction completes, at which point a - * subsequent mount starts recovery from the beginning. - * - * Write verifiers update the metadata LSN from log items attached to - * the buffer. Therefore, initialize a bli purely to carry the LSN to - * the verifier. We'll clean it up in our ->iodone() callback. - */ - if (bp->b_ops) { - struct xfs_buf_log_item *bip; - - ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone); - bp->b_iodone = xlog_recover_iodone; - xfs_buf_item_init(bp, mp); - bip = bp->b_log_item; - bip->bli_item.li_lsn = current_lsn; - } -} - -/* - * Perform a 'normal' buffer recovery. Each logged region of the - * buffer should be copied over the corresponding region in the - * given buffer. The bitmap in the buf log format structure indicates - * where to place the logged data. - */ -STATIC void -xlog_recover_do_reg_buffer( - struct xfs_mount *mp, - xlog_recover_item_t *item, - struct xfs_buf *bp, - xfs_buf_log_format_t *buf_f, - xfs_lsn_t current_lsn) -{ - int i; - int bit; - int nbits; - xfs_failaddr_t fa; - const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot); - - trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); - - bit = 0; - i = 1; /* 0 is the buf format structure */ - while (1) { - bit = xfs_next_bit(buf_f->blf_data_map, - buf_f->blf_map_size, bit); - if (bit == -1) - break; - nbits = xfs_contig_bits(buf_f->blf_data_map, - buf_f->blf_map_size, bit); - ASSERT(nbits > 0); - ASSERT(item->ri_buf[i].i_addr != NULL); - ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); - ASSERT(BBTOB(bp->b_length) >= - ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); - - /* - * The dirty regions logged in the buffer, even though - * contiguous, may span multiple chunks. This is because the - * dirty region may span a physical page boundary in a buffer - * and hence be split into two separate vectors for writing into - * the log. Hence we need to trim nbits back to the length of - * the current region being copied out of the log. - */ - if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) - nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; - - /* - * Do a sanity check if this is a dquot buffer. Just checking - * the first dquot in the buffer should do. XXXThis is - * probably a good thing to do for other buf types also. - */ - fa = NULL; - if (buf_f->blf_flags & - (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { - if (item->ri_buf[i].i_addr == NULL) { - xfs_alert(mp, - "XFS: NULL dquot in %s.", __func__); - goto next; - } - if (item->ri_buf[i].i_len < size_disk_dquot) { - xfs_alert(mp, - "XFS: dquot too small (%d) in %s.", - item->ri_buf[i].i_len, __func__); - goto next; - } - fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, - -1, 0); - if (fa) { - xfs_alert(mp, - "dquot corrupt at %pS trying to replay into block 0x%llx", - fa, bp->b_bn); - goto next; - } - } - - memcpy(xfs_buf_offset(bp, - (uint)bit << XFS_BLF_SHIFT), /* dest */ - item->ri_buf[i].i_addr, /* source */ - nbits<<XFS_BLF_SHIFT); /* length */ - next: - i++; - bit += nbits; - } - - /* Shouldn't be any more regions */ - ASSERT(i == item->ri_total); - - xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn); -} - -/* - * Perform a dquot buffer recovery. - * Simple algorithm: if we have found a QUOTAOFF log item of the same type - * (ie. USR or GRP), then just toss this buffer away; don't recover it. - * Else, treat it as a regular buffer and do recovery. - * - * Return false if the buffer was tossed and true if we recovered the buffer to - * indicate to the caller if the buffer needs writing. - */ -STATIC bool -xlog_recover_do_dquot_buffer( - struct xfs_mount *mp, - struct xlog *log, - struct xlog_recover_item *item, - struct xfs_buf *bp, - struct xfs_buf_log_format *buf_f) -{ - uint type; - - trace_xfs_log_recover_buf_dquot_buf(log, buf_f); - - /* - * Filesystems are required to send in quota flags at mount time. - */ - if (!mp->m_qflags) - return false; - - type = 0; - if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) - type |= XFS_DQ_USER; - if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) - type |= XFS_DQ_PROJ; - if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) - type |= XFS_DQ_GROUP; - /* - * This type of quotas was turned off, so ignore this buffer - */ - if (log->l_quotaoffs_flag & type) - return false; - - xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN); - return true; -} - -/* - * This routine replays a modification made to a buffer at runtime. - * There are actually two types of buffer, regular and inode, which - * are handled differently. Inode buffers are handled differently - * in that we only recover a specific set of data from them, namely - * the inode di_next_unlinked fields. This is because all other inode - * data is actually logged via inode records and any data we replay - * here which overlaps that may be stale. - * - * When meta-data buffers are freed at run time we log a buffer item - * with the XFS_BLF_CANCEL bit set to indicate that previous copies - * of the buffer in the log should not be replayed at recovery time. - * This is so that if the blocks covered by the buffer are reused for - * file data before we crash we don't end up replaying old, freed - * meta-data into a user's file. - * - * To handle the cancellation of buffer log items, we make two passes - * over the log during recovery. During the first we build a table of - * those buffers which have been cancelled, and during the second we - * only replay those buffers which do not have corresponding cancel - * records in the table. See xlog_recover_buffer_pass[1,2] above - * for more details on the implementation of the table of cancel records. - */ -STATIC int -xlog_recover_buffer_pass2( - struct xlog *log, - struct list_head *buffer_list, - struct xlog_recover_item *item, - xfs_lsn_t current_lsn) -{ - xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; - xfs_mount_t *mp = log->l_mp; - xfs_buf_t *bp; - int error; - uint buf_flags; - xfs_lsn_t lsn; - - /* - * In this pass we only want to recover all the buffers which have - * not been cancelled and are not cancellation buffers themselves. - */ - if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, - buf_f->blf_len, buf_f->blf_flags)) { - trace_xfs_log_recover_buf_cancel(log, buf_f); - return 0; - } - - trace_xfs_log_recover_buf_recover(log, buf_f); - - buf_flags = 0; - if (buf_f->blf_flags & XFS_BLF_INODE_BUF) - buf_flags |= XBF_UNMAPPED; - - error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, - buf_flags, &bp, NULL); - if (error) - return error; - - /* - * Recover the buffer only if we get an LSN from it and it's less than - * the lsn of the transaction we are replaying. - * - * Note that we have to be extremely careful of readahead here. - * Readahead does not attach verfiers to the buffers so if we don't - * actually do any replay after readahead because of the LSN we found - * in the buffer if more recent than that current transaction then we - * need to attach the verifier directly. Failure to do so can lead to - * future recovery actions (e.g. EFI and unlinked list recovery) can - * operate on the buffers and they won't get the verifier attached. This - * can lead to blocks on disk having the correct content but a stale - * CRC. - * - * It is safe to assume these clean buffers are currently up to date. - * If the buffer is dirtied by a later transaction being replayed, then - * the verifier will be reset to match whatever recover turns that - * buffer into. - */ - lsn = xlog_recover_get_buf_lsn(mp, bp); - if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { - trace_xfs_log_recover_buf_skip(log, buf_f); - xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN); - goto out_release; - } - - if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { - error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); - if (error) - goto out_release; - } else if (buf_f->blf_flags & - (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { - bool dirty; - - dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); - if (!dirty) - goto out_release; - } else { - xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn); - } - - /* - * Perform delayed write on the buffer. Asynchronous writes will be - * slower when taking into account all the buffers to be flushed. - * - * Also make sure that only inode buffers with good sizes stay in - * the buffer cache. The kernel moves inodes in buffers of 1 block - * or inode_cluster_size bytes, whichever is bigger. The inode - * buffers in the log can be a different size if the log was generated - * by an older kernel using unclustered inode buffers or a newer kernel - * running with a different inode cluster size. Regardless, if the - * the inode buffer size isn't max(blocksize, inode_cluster_size) - * for *our* value of inode_cluster_size, then we need to keep - * the buffer out of the buffer cache so that the buffer won't - * overlap with future reads of those inodes. - */ - if (XFS_DINODE_MAGIC == - be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && - (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) { - xfs_buf_stale(bp); - error = xfs_bwrite(bp); - } else { - ASSERT(bp->b_mount == mp); - bp->b_iodone = xlog_recover_iodone; - xfs_buf_delwri_queue(bp, buffer_list); - } - -out_release: - xfs_buf_relse(bp); - return error; -} - -/* - * Inode fork owner changes - * - * If we have been told that we have to reparent the inode fork, it's because an - * extent swap operation on a CRC enabled filesystem has been done and we are - * replaying it. We need to walk the BMBT of the appropriate fork and change the - * owners of it. - * - * The complexity here is that we don't have an inode context to work with, so - * after we've replayed the inode we need to instantiate one. This is where the - * fun begins. - * - * We are in the middle of log recovery, so we can't run transactions. That - * means we cannot use cache coherent inode instantiation via xfs_iget(), as - * that will result in the corresponding iput() running the inode through - * xfs_inactive(). If we've just replayed an inode core that changes the link - * count to zero (i.e. it's been unlinked), then xfs_inactive() will run - * transactions (bad!). - * - * So, to avoid this, we instantiate an inode directly from the inode core we've - * just recovered. We have the buffer still locked, and all we really need to - * instantiate is the inode core and the forks being modified. We can do this - * manually, then run the inode btree owner change, and then tear down the - * xfs_inode without having to run any transactions at all. - * - * Also, because we don't have a transaction context available here but need to - * gather all the buffers we modify for writeback so we pass the buffer_list - * instead for the operation to use. - */ - -STATIC int -xfs_recover_inode_owner_change( - struct xfs_mount *mp, - struct xfs_dinode *dip, - struct xfs_inode_log_format *in_f, - struct list_head *buffer_list) + const struct xfs_buf_ops *ops) { - struct xfs_inode *ip; - int error; - - ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)); - - ip = xfs_inode_alloc(mp, in_f->ilf_ino); - if (!ip) - return -ENOMEM; - - /* instantiate the inode */ - ASSERT(dip->di_version >= 3); - xfs_inode_from_disk(ip, dip); - - error = xfs_iformat_fork(ip, dip); - if (error) - goto out_free_ip; - - if (!xfs_inode_verify_forks(ip)) { - error = -EFSCORRUPTED; - goto out_free_ip; - } - - if (in_f->ilf_fields & XFS_ILOG_DOWNER) { - ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT); - error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK, - ip->i_ino, buffer_list); - if (error) - goto out_free_ip; - } - - if (in_f->ilf_fields & XFS_ILOG_AOWNER) { - ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT); - error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK, - ip->i_ino, buffer_list); - if (error) - goto out_free_ip; - } - -out_free_ip: - xfs_inode_free(ip); - return error; -} - -STATIC int -xlog_recover_inode_pass2( - struct xlog *log, - struct list_head *buffer_list, - struct xlog_recover_item *item, - xfs_lsn_t current_lsn) -{ - struct xfs_inode_log_format *in_f; - xfs_mount_t *mp = log->l_mp; - xfs_buf_t *bp; - xfs_dinode_t *dip; - int len; - char *src; - char *dest; - int error; - int attr_index; - uint fields; - struct xfs_log_dinode *ldip; - uint isize; - int need_free = 0; - - if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { - in_f = item->ri_buf[0].i_addr; - } else { - in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0); - need_free = 1; - error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); - if (error) - goto error; - } - - /* - * Inode buffers can be freed, look out for it, - * and do not replay the inode. - */ - if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno, - in_f->ilf_len, 0)) { - error = 0; - trace_xfs_log_recover_inode_cancel(log, in_f); - goto error; - } - trace_xfs_log_recover_inode_recover(log, in_f); - - error = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, - 0, &bp, &xfs_inode_buf_ops); - if (error) - goto error; - ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); - dip = xfs_buf_offset(bp, in_f->ilf_boffset); - - /* - * Make sure the place we're flushing out to really looks - * like an inode! - */ - if (XFS_IS_CORRUPT(mp, !xfs_verify_magic16(bp, dip->di_magic))) { - xfs_alert(mp, - "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld", - __func__, dip, bp, in_f->ilf_ino); - error = -EFSCORRUPTED; - goto out_release; - } - ldip = item->ri_buf[1].i_addr; - if (XFS_IS_CORRUPT(mp, ldip->di_magic != XFS_DINODE_MAGIC)) { - xfs_alert(mp, - "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld", - __func__, item, in_f->ilf_ino); - error = -EFSCORRUPTED; - goto out_release; - } - - /* - * If the inode has an LSN in it, recover the inode only if it's less - * than the lsn of the transaction we are replaying. Note: we still - * need to replay an owner change even though the inode is more recent - * than the transaction as there is no guarantee that all the btree - * blocks are more recent than this transaction, too. - */ - if (dip->di_version >= 3) { - xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); - - if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { - trace_xfs_log_recover_inode_skip(log, in_f); - error = 0; - goto out_owner_change; - } - } - - /* - * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes - * are transactional and if ordering is necessary we can determine that - * more accurately by the LSN field in the V3 inode core. Don't trust - * the inode versions we might be changing them here - use the - * superblock flag to determine whether we need to look at di_flushiter - * to skip replay when the on disk inode is newer than the log one - */ - if (!xfs_sb_version_has_v3inode(&mp->m_sb) && - ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) { - /* - * Deal with the wrap case, DI_MAX_FLUSH is less - * than smaller numbers - */ - if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && - ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) { - /* do nothing */ - } else { - trace_xfs_log_recover_inode_skip(log, in_f); - error = 0; - goto out_release; - } - } - - /* Take the opportunity to reset the flush iteration count */ - ldip->di_flushiter = 0; - - if (unlikely(S_ISREG(ldip->di_mode))) { - if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) && - (ldip->di_format != XFS_DINODE_FMT_BTREE)) { - XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", - XFS_ERRLEVEL_LOW, mp, ldip, - sizeof(*ldip)); - xfs_alert(mp, - "%s: Bad regular inode log record, rec ptr "PTR_FMT", " - "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld", - __func__, item, dip, bp, in_f->ilf_ino); - error = -EFSCORRUPTED; - goto out_release; - } - } else if (unlikely(S_ISDIR(ldip->di_mode))) { - if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) && - (ldip->di_format != XFS_DINODE_FMT_BTREE) && - (ldip->di_format != XFS_DINODE_FMT_LOCAL)) { - XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", - XFS_ERRLEVEL_LOW, mp, ldip, - sizeof(*ldip)); - xfs_alert(mp, - "%s: Bad dir inode log record, rec ptr "PTR_FMT", " - "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld", - __func__, item, dip, bp, in_f->ilf_ino); - error = -EFSCORRUPTED; - goto out_release; - } - } - if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){ - XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", - XFS_ERRLEVEL_LOW, mp, ldip, - sizeof(*ldip)); - xfs_alert(mp, - "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", " - "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld", - __func__, item, dip, bp, in_f->ilf_ino, - ldip->di_nextents + ldip->di_anextents, - ldip->di_nblocks); - error = -EFSCORRUPTED; - goto out_release; - } - if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) { - XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", - XFS_ERRLEVEL_LOW, mp, ldip, - sizeof(*ldip)); - xfs_alert(mp, - "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", " - "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__, - item, dip, bp, in_f->ilf_ino, ldip->di_forkoff); - error = -EFSCORRUPTED; - goto out_release; - } - isize = xfs_log_dinode_size(mp); - if (unlikely(item->ri_buf[1].i_len > isize)) { - XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", - XFS_ERRLEVEL_LOW, mp, ldip, - sizeof(*ldip)); - xfs_alert(mp, - "%s: Bad inode log record length %d, rec ptr "PTR_FMT, - __func__, item->ri_buf[1].i_len, item); - error = -EFSCORRUPTED; - goto out_release; - } - - /* recover the log dinode inode into the on disk inode */ - xfs_log_dinode_to_disk(ldip, dip); - - fields = in_f->ilf_fields; - if (fields & XFS_ILOG_DEV) - xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev); - - if (in_f->ilf_size == 2) - goto out_owner_change; - len = item->ri_buf[2].i_len; - src = item->ri_buf[2].i_addr; - ASSERT(in_f->ilf_size <= 4); - ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); - ASSERT(!(fields & XFS_ILOG_DFORK) || - (len == in_f->ilf_dsize)); - - switch (fields & XFS_ILOG_DFORK) { - case XFS_ILOG_DDATA: - case XFS_ILOG_DEXT: - memcpy(XFS_DFORK_DPTR(dip), src, len); - break; - - case XFS_ILOG_DBROOT: - xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, - (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip), - XFS_DFORK_DSIZE(dip, mp)); - break; - - default: - /* - * There are no data fork flags set. - */ - ASSERT((fields & XFS_ILOG_DFORK) == 0); - break; - } - - /* - * If we logged any attribute data, recover it. There may or - * may not have been any other non-core data logged in this - * transaction. - */ - if (in_f->ilf_fields & XFS_ILOG_AFORK) { - if (in_f->ilf_fields & XFS_ILOG_DFORK) { - attr_index = 3; - } else { - attr_index = 2; - } - len = item->ri_buf[attr_index].i_len; - src = item->ri_buf[attr_index].i_addr; - ASSERT(len == in_f->ilf_asize); - - switch (in_f->ilf_fields & XFS_ILOG_AFORK) { - case XFS_ILOG_ADATA: - case XFS_ILOG_AEXT: - dest = XFS_DFORK_APTR(dip); - ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); - memcpy(dest, src, len); - break; - - case XFS_ILOG_ABROOT: - dest = XFS_DFORK_APTR(dip); - xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, - len, (xfs_bmdr_block_t*)dest, - XFS_DFORK_ASIZE(dip, mp)); - break; - - default: - xfs_warn(log->l_mp, "%s: Invalid flag", __func__); - ASSERT(0); - error = -EFSCORRUPTED; - goto out_release; - } - } - -out_owner_change: - /* Recover the swapext owner change unless inode has been deleted */ - if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) && - (dip->di_mode != 0)) - error = xfs_recover_inode_owner_change(mp, dip, in_f, - buffer_list); - /* re-generate the checksum. */ - xfs_dinode_calc_crc(log->l_mp, dip); - - ASSERT(bp->b_mount == mp); - bp->b_iodone = xlog_recover_iodone; - xfs_buf_delwri_queue(bp, buffer_list); - -out_release: - xfs_buf_relse(bp); -error: - if (need_free) - kmem_free(in_f); - return error; -} - -/* - * Recover QUOTAOFF records. We simply make a note of it in the xlog - * structure, so that we know not to do any dquot item or dquot buffer recovery, - * of that type. - */ -STATIC int -xlog_recover_quotaoff_pass1( - struct xlog *log, - struct xlog_recover_item *item) -{ - xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr; - ASSERT(qoff_f); - - /* - * The logitem format's flag tells us if this was user quotaoff, - * group/project quotaoff or both. - */ - if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) - log->l_quotaoffs_flag |= XFS_DQ_USER; - if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) - log->l_quotaoffs_flag |= XFS_DQ_PROJ; - if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) - log->l_quotaoffs_flag |= XFS_DQ_GROUP; - - return 0; -} - -/* - * Recover a dquot record - */ -STATIC int -xlog_recover_dquot_pass2( - struct xlog *log, - struct list_head *buffer_list, - struct xlog_recover_item *item, - xfs_lsn_t current_lsn) -{ - xfs_mount_t *mp = log->l_mp; - xfs_buf_t *bp; - struct xfs_disk_dquot *ddq, *recddq; - xfs_failaddr_t fa; - int error; - xfs_dq_logformat_t *dq_f; - uint type; - - - /* - * Filesystems are required to send in quota flags at mount time. - */ - if (mp->m_qflags == 0) - return 0; - - recddq = item->ri_buf[1].i_addr; - if (recddq == NULL) { - xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); - return -EFSCORRUPTED; - } - if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) { - xfs_alert(log->l_mp, "dquot too small (%d) in %s.", - item->ri_buf[1].i_len, __func__); - return -EFSCORRUPTED; - } - - /* - * This type of quotas was turned off, so ignore this record. - */ - type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); - ASSERT(type); - if (log->l_quotaoffs_flag & type) - return 0; - - /* - * At this point we know that quota was _not_ turned off. - * Since the mount flags are not indicating to us otherwise, this - * must mean that quota is on, and the dquot needs to be replayed. - * Remember that we may not have fully recovered the superblock yet, - * so we can't do the usual trick of looking at the SB quota bits. - * - * The other possibility, of course, is that the quota subsystem was - * removed since the last mount - ENOSYS. - */ - dq_f = item->ri_buf[0].i_addr; - ASSERT(dq_f); - fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0); - if (fa) { - xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS", - dq_f->qlf_id, fa); - return -EFSCORRUPTED; - } - ASSERT(dq_f->qlf_len == 1); - - /* - * At this point we are assuming that the dquots have been allocated - * and hence the buffer has valid dquots stamped in it. It should, - * therefore, pass verifier validation. If the dquot is bad, then the - * we'll return an error here, so we don't need to specifically check - * the dquot in the buffer after the verifier has run. - */ - error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, - XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, - &xfs_dquot_buf_ops); - if (error) - return error; - - ASSERT(bp); - ddq = xfs_buf_offset(bp, dq_f->qlf_boffset); - - /* - * If the dquot has an LSN in it, recover the dquot only if it's less - * than the lsn of the transaction we are replaying. - */ - if (xfs_sb_version_hascrc(&mp->m_sb)) { - struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq; - xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn); - - if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { - goto out_release; - } - } - - memcpy(ddq, recddq, item->ri_buf[1].i_len); - if (xfs_sb_version_hascrc(&mp->m_sb)) { - xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), - XFS_DQUOT_CRC_OFF); - } - - ASSERT(dq_f->qlf_size == 2); - ASSERT(bp->b_mount == mp); - bp->b_iodone = xlog_recover_iodone; - xfs_buf_delwri_queue(bp, buffer_list); - -out_release: - xfs_buf_relse(bp); - return 0; -} - -/* - * This routine is called to create an in-core extent free intent - * item from the efi format structure which was logged on disk. - * It allocates an in-core efi, copies the extents from the format - * structure into it, and adds the efi to the AIL with the given - * LSN. - */ -STATIC int -xlog_recover_efi_pass2( - struct xlog *log, - struct xlog_recover_item *item, - xfs_lsn_t lsn) -{ - int error; - struct xfs_mount *mp = log->l_mp; - struct xfs_efi_log_item *efip; - struct xfs_efi_log_format *efi_formatp; - - efi_formatp = item->ri_buf[0].i_addr; - - efip = xfs_efi_init(mp, efi_formatp->efi_nextents); - error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format); - if (error) { - xfs_efi_item_free(efip); - return error; - } - atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); - - spin_lock(&log->l_ailp->ail_lock); - /* - * The EFI has two references. One for the EFD and one for EFI to ensure - * it makes it into the AIL. Insert the EFI into the AIL directly and - * drop the EFI reference. Note that xfs_trans_ail_update() drops the - * AIL lock. - */ - xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn); - xfs_efi_release(efip); - return 0; -} - - -/* - * This routine is called when an EFD format structure is found in a committed - * transaction in the log. Its purpose is to cancel the corresponding EFI if it - * was still in the log. To do this it searches the AIL for the EFI with an id - * equal to that in the EFD format structure. If we find it we drop the EFD - * reference, which removes the EFI from the AIL and frees it. - */ -STATIC int -xlog_recover_efd_pass2( - struct xlog *log, - struct xlog_recover_item *item) -{ - xfs_efd_log_format_t *efd_formatp; - xfs_efi_log_item_t *efip = NULL; - struct xfs_log_item *lip; - uint64_t efi_id; - struct xfs_ail_cursor cur; - struct xfs_ail *ailp = log->l_ailp; - - efd_formatp = item->ri_buf[0].i_addr; - ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + - ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || - (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + - ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t))))); - efi_id = efd_formatp->efd_efi_id; - - /* - * Search for the EFI with the id in the EFD format structure in the - * AIL. - */ - spin_lock(&ailp->ail_lock); - lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); - while (lip != NULL) { - if (lip->li_type == XFS_LI_EFI) { - efip = (xfs_efi_log_item_t *)lip; - if (efip->efi_format.efi_id == efi_id) { - /* - * Drop the EFD reference to the EFI. This - * removes the EFI from the AIL and frees it. - */ - spin_unlock(&ailp->ail_lock); - xfs_efi_release(efip); - spin_lock(&ailp->ail_lock); - break; - } - } - lip = xfs_trans_ail_cursor_next(ailp, &cur); - } - - xfs_trans_ail_cursor_done(&cur); - spin_unlock(&ailp->ail_lock); - - return 0; -} - -/* - * This routine is called to create an in-core extent rmap update - * item from the rui format structure which was logged on disk. - * It allocates an in-core rui, copies the extents from the format - * structure into it, and adds the rui to the AIL with the given - * LSN. - */ -STATIC int -xlog_recover_rui_pass2( - struct xlog *log, - struct xlog_recover_item *item, - xfs_lsn_t lsn) -{ - int error; - struct xfs_mount *mp = log->l_mp; - struct xfs_rui_log_item *ruip; - struct xfs_rui_log_format *rui_formatp; - - rui_formatp = item->ri_buf[0].i_addr; - - ruip = xfs_rui_init(mp, rui_formatp->rui_nextents); - error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format); - if (error) { - xfs_rui_item_free(ruip); - return error; - } - atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); - - spin_lock(&log->l_ailp->ail_lock); - /* - * The RUI has two references. One for the RUD and one for RUI to ensure - * it makes it into the AIL. Insert the RUI into the AIL directly and - * drop the RUI reference. Note that xfs_trans_ail_update() drops the - * AIL lock. - */ - xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn); - xfs_rui_release(ruip); - return 0; -} - - -/* - * This routine is called when an RUD format structure is found in a committed - * transaction in the log. Its purpose is to cancel the corresponding RUI if it - * was still in the log. To do this it searches the AIL for the RUI with an id - * equal to that in the RUD format structure. If we find it we drop the RUD - * reference, which removes the RUI from the AIL and frees it. - */ -STATIC int -xlog_recover_rud_pass2( - struct xlog *log, - struct xlog_recover_item *item) -{ - struct xfs_rud_log_format *rud_formatp; - struct xfs_rui_log_item *ruip = NULL; - struct xfs_log_item *lip; - uint64_t rui_id; - struct xfs_ail_cursor cur; - struct xfs_ail *ailp = log->l_ailp; - - rud_formatp = item->ri_buf[0].i_addr; - ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format)); - rui_id = rud_formatp->rud_rui_id; - - /* - * Search for the RUI with the id in the RUD format structure in the - * AIL. - */ - spin_lock(&ailp->ail_lock); - lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); - while (lip != NULL) { - if (lip->li_type == XFS_LI_RUI) { - ruip = (struct xfs_rui_log_item *)lip; - if (ruip->rui_format.rui_id == rui_id) { - /* - * Drop the RUD reference to the RUI. This - * removes the RUI from the AIL and frees it. - */ - spin_unlock(&ailp->ail_lock); - xfs_rui_release(ruip); - spin_lock(&ailp->ail_lock); - break; - } - } - lip = xfs_trans_ail_cursor_next(ailp, &cur); - } - - xfs_trans_ail_cursor_done(&cur); - spin_unlock(&ailp->ail_lock); - - return 0; -} - -/* - * Copy an CUI format buffer from the given buf, and into the destination - * CUI format structure. The CUI/CUD items were designed not to need any - * special alignment handling. - */ -static int -xfs_cui_copy_format( - struct xfs_log_iovec *buf, - struct xfs_cui_log_format *dst_cui_fmt) -{ - struct xfs_cui_log_format *src_cui_fmt; - uint len; - - src_cui_fmt = buf->i_addr; - len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents); - - if (buf->i_len == len) { - memcpy(dst_cui_fmt, src_cui_fmt, len); - return 0; - } - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); - return -EFSCORRUPTED; -} - -/* - * This routine is called to create an in-core extent refcount update - * item from the cui format structure which was logged on disk. - * It allocates an in-core cui, copies the extents from the format - * structure into it, and adds the cui to the AIL with the given - * LSN. - */ -STATIC int -xlog_recover_cui_pass2( - struct xlog *log, - struct xlog_recover_item *item, - xfs_lsn_t lsn) -{ - int error; - struct xfs_mount *mp = log->l_mp; - struct xfs_cui_log_item *cuip; - struct xfs_cui_log_format *cui_formatp; - - cui_formatp = item->ri_buf[0].i_addr; - - cuip = xfs_cui_init(mp, cui_formatp->cui_nextents); - error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format); - if (error) { - xfs_cui_item_free(cuip); - return error; - } - atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); - - spin_lock(&log->l_ailp->ail_lock); - /* - * The CUI has two references. One for the CUD and one for CUI to ensure - * it makes it into the AIL. Insert the CUI into the AIL directly and - * drop the CUI reference. Note that xfs_trans_ail_update() drops the - * AIL lock. - */ - xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn); - xfs_cui_release(cuip); - return 0; -} - - -/* - * This routine is called when an CUD format structure is found in a committed - * transaction in the log. Its purpose is to cancel the corresponding CUI if it - * was still in the log. To do this it searches the AIL for the CUI with an id - * equal to that in the CUD format structure. If we find it we drop the CUD - * reference, which removes the CUI from the AIL and frees it. - */ -STATIC int -xlog_recover_cud_pass2( - struct xlog *log, - struct xlog_recover_item *item) -{ - struct xfs_cud_log_format *cud_formatp; - struct xfs_cui_log_item *cuip = NULL; - struct xfs_log_item *lip; - uint64_t cui_id; - struct xfs_ail_cursor cur; - struct xfs_ail *ailp = log->l_ailp; - - cud_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); - return -EFSCORRUPTED; - } - cui_id = cud_formatp->cud_cui_id; - - /* - * Search for the CUI with the id in the CUD format structure in the - * AIL. - */ - spin_lock(&ailp->ail_lock); - lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); - while (lip != NULL) { - if (lip->li_type == XFS_LI_CUI) { - cuip = (struct xfs_cui_log_item *)lip; - if (cuip->cui_format.cui_id == cui_id) { - /* - * Drop the CUD reference to the CUI. This - * removes the CUI from the AIL and frees it. - */ - spin_unlock(&ailp->ail_lock); - xfs_cui_release(cuip); - spin_lock(&ailp->ail_lock); - break; - } - } - lip = xfs_trans_ail_cursor_next(ailp, &cur); - } - - xfs_trans_ail_cursor_done(&cur); - spin_unlock(&ailp->ail_lock); - - return 0; -} - -/* - * Copy an BUI format buffer from the given buf, and into the destination - * BUI format structure. The BUI/BUD items were designed not to need any - * special alignment handling. - */ -static int -xfs_bui_copy_format( - struct xfs_log_iovec *buf, - struct xfs_bui_log_format *dst_bui_fmt) -{ - struct xfs_bui_log_format *src_bui_fmt; - uint len; - - src_bui_fmt = buf->i_addr; - len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents); - - if (buf->i_len == len) { - memcpy(dst_bui_fmt, src_bui_fmt, len); - return 0; - } - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); - return -EFSCORRUPTED; -} - -/* - * This routine is called to create an in-core extent bmap update - * item from the bui format structure which was logged on disk. - * It allocates an in-core bui, copies the extents from the format - * structure into it, and adds the bui to the AIL with the given - * LSN. - */ -STATIC int -xlog_recover_bui_pass2( - struct xlog *log, - struct xlog_recover_item *item, - xfs_lsn_t lsn) -{ - int error; - struct xfs_mount *mp = log->l_mp; - struct xfs_bui_log_item *buip; - struct xfs_bui_log_format *bui_formatp; - - bui_formatp = item->ri_buf[0].i_addr; - - if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); - return -EFSCORRUPTED; - } - buip = xfs_bui_init(mp); - error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format); - if (error) { - xfs_bui_item_free(buip); - return error; - } - atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents); - - spin_lock(&log->l_ailp->ail_lock); - /* - * The RUI has two references. One for the RUD and one for RUI to ensure - * it makes it into the AIL. Insert the RUI into the AIL directly and - * drop the RUI reference. Note that xfs_trans_ail_update() drops the - * AIL lock. - */ - xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn); - xfs_bui_release(buip); - return 0; -} - - -/* - * This routine is called when an BUD format structure is found in a committed - * transaction in the log. Its purpose is to cancel the corresponding BUI if it - * was still in the log. To do this it searches the AIL for the BUI with an id - * equal to that in the BUD format structure. If we find it we drop the BUD - * reference, which removes the BUI from the AIL and frees it. - */ -STATIC int -xlog_recover_bud_pass2( - struct xlog *log, - struct xlog_recover_item *item) -{ - struct xfs_bud_log_format *bud_formatp; - struct xfs_bui_log_item *buip = NULL; - struct xfs_log_item *lip; - uint64_t bui_id; - struct xfs_ail_cursor cur; - struct xfs_ail *ailp = log->l_ailp; - - bud_formatp = item->ri_buf[0].i_addr; - if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) { - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); - return -EFSCORRUPTED; - } - bui_id = bud_formatp->bud_bui_id; - - /* - * Search for the BUI with the id in the BUD format structure in the - * AIL. - */ - spin_lock(&ailp->ail_lock); - lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); - while (lip != NULL) { - if (lip->li_type == XFS_LI_BUI) { - buip = (struct xfs_bui_log_item *)lip; - if (buip->bui_format.bui_id == bui_id) { - /* - * Drop the BUD reference to the BUI. This - * removes the BUI from the AIL and frees it. - */ - spin_unlock(&ailp->ail_lock); - xfs_bui_release(buip); - spin_lock(&ailp->ail_lock); - break; - } - } - lip = xfs_trans_ail_cursor_next(ailp, &cur); - } - - xfs_trans_ail_cursor_done(&cur); - spin_unlock(&ailp->ail_lock); - - return 0; -} - -/* - * This routine is called when an inode create format structure is found in a - * committed transaction in the log. It's purpose is to initialise the inodes - * being allocated on disk. This requires us to get inode cluster buffers that - * match the range to be initialised, stamped with inode templates and written - * by delayed write so that subsequent modifications will hit the cached buffer - * and only need writing out at the end of recovery. - */ -STATIC int -xlog_recover_do_icreate_pass2( - struct xlog *log, - struct list_head *buffer_list, - xlog_recover_item_t *item) -{ - struct xfs_mount *mp = log->l_mp; - struct xfs_icreate_log *icl; - struct xfs_ino_geometry *igeo = M_IGEO(mp); - xfs_agnumber_t agno; - xfs_agblock_t agbno; - unsigned int count; - unsigned int isize; - xfs_agblock_t length; - int bb_per_cluster; - int cancel_count; - int nbufs; - int i; - - icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; - if (icl->icl_type != XFS_LI_ICREATE) { - xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); - return -EINVAL; - } - - if (icl->icl_size != 1) { - xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); - return -EINVAL; - } - - agno = be32_to_cpu(icl->icl_ag); - if (agno >= mp->m_sb.sb_agcount) { - xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); - return -EINVAL; - } - agbno = be32_to_cpu(icl->icl_agbno); - if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { - xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); - return -EINVAL; - } - isize = be32_to_cpu(icl->icl_isize); - if (isize != mp->m_sb.sb_inodesize) { - xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); - return -EINVAL; - } - count = be32_to_cpu(icl->icl_count); - if (!count) { - xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); - return -EINVAL; - } - length = be32_to_cpu(icl->icl_length); - if (!length || length >= mp->m_sb.sb_agblocks) { - xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); - return -EINVAL; - } - - /* - * The inode chunk is either full or sparse and we only support - * m_ino_geo.ialloc_min_blks sized sparse allocations at this time. - */ - if (length != igeo->ialloc_blks && - length != igeo->ialloc_min_blks) { - xfs_warn(log->l_mp, - "%s: unsupported chunk length", __FUNCTION__); - return -EINVAL; - } - - /* verify inode count is consistent with extent length */ - if ((count >> mp->m_sb.sb_inopblog) != length) { - xfs_warn(log->l_mp, - "%s: inconsistent inode count and chunk length", - __FUNCTION__); - return -EINVAL; - } - - /* - * The icreate transaction can cover multiple cluster buffers and these - * buffers could have been freed and reused. Check the individual - * buffers for cancellation so we don't overwrite anything written after - * a cancellation. - */ - bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster); - nbufs = length / igeo->blocks_per_cluster; - for (i = 0, cancel_count = 0; i < nbufs; i++) { - xfs_daddr_t daddr; - - daddr = XFS_AGB_TO_DADDR(mp, agno, - agbno + i * igeo->blocks_per_cluster); - if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0)) - cancel_count++; - } - - /* - * We currently only use icreate for a single allocation at a time. This - * means we should expect either all or none of the buffers to be - * cancelled. Be conservative and skip replay if at least one buffer is - * cancelled, but warn the user that something is awry if the buffers - * are not consistent. - * - * XXX: This must be refined to only skip cancelled clusters once we use - * icreate for multiple chunk allocations. - */ - ASSERT(!cancel_count || cancel_count == nbufs); - if (cancel_count) { - if (cancel_count != nbufs) - xfs_warn(mp, - "WARNING: partial inode chunk cancellation, skipped icreate."); - trace_xfs_log_recover_icreate_cancel(log, icl); - return 0; - } - - trace_xfs_log_recover_icreate_recover(log, icl); - return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno, - length, be32_to_cpu(icl->icl_gen)); -} - -STATIC void -xlog_recover_buffer_ra_pass2( - struct xlog *log, - struct xlog_recover_item *item) -{ - struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; - struct xfs_mount *mp = log->l_mp; - - if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno, - buf_f->blf_len, buf_f->blf_flags)) { - return; - } - - xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno, - buf_f->blf_len, NULL); -} - -STATIC void -xlog_recover_inode_ra_pass2( - struct xlog *log, - struct xlog_recover_item *item) -{ - struct xfs_inode_log_format ilf_buf; - struct xfs_inode_log_format *ilfp; - struct xfs_mount *mp = log->l_mp; - int error; - - if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { - ilfp = item->ri_buf[0].i_addr; - } else { - ilfp = &ilf_buf; - memset(ilfp, 0, sizeof(*ilfp)); - error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp); - if (error) - return; - } - - if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0)) - return; - - xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno, - ilfp->ilf_len, &xfs_inode_buf_ra_ops); -} - -STATIC void -xlog_recover_dquot_ra_pass2( - struct xlog *log, - struct xlog_recover_item *item) -{ - struct xfs_mount *mp = log->l_mp; - struct xfs_disk_dquot *recddq; - struct xfs_dq_logformat *dq_f; - uint type; - int len; - - - if (mp->m_qflags == 0) - return; - - recddq = item->ri_buf[1].i_addr; - if (recddq == NULL) - return; - if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) - return; - - type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); - ASSERT(type); - if (log->l_quotaoffs_flag & type) - return; - - dq_f = item->ri_buf[0].i_addr; - ASSERT(dq_f); - ASSERT(dq_f->qlf_len == 1); - - len = XFS_FSB_TO_BB(mp, dq_f->qlf_len); - if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0)) - return; - - xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len, - &xfs_dquot_buf_ra_ops); -} - -STATIC void -xlog_recover_ra_pass2( - struct xlog *log, - struct xlog_recover_item *item) -{ - switch (ITEM_TYPE(item)) { - case XFS_LI_BUF: - xlog_recover_buffer_ra_pass2(log, item); - break; - case XFS_LI_INODE: - xlog_recover_inode_ra_pass2(log, item); - break; - case XFS_LI_DQUOT: - xlog_recover_dquot_ra_pass2(log, item); - break; - case XFS_LI_EFI: - case XFS_LI_EFD: - case XFS_LI_QUOTAOFF: - case XFS_LI_RUI: - case XFS_LI_RUD: - case XFS_LI_CUI: - case XFS_LI_CUD: - case XFS_LI_BUI: - case XFS_LI_BUD: - default: - break; - } -} - -STATIC int -xlog_recover_commit_pass1( - struct xlog *log, - struct xlog_recover *trans, - struct xlog_recover_item *item) -{ - trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); - - switch (ITEM_TYPE(item)) { - case XFS_LI_BUF: - return xlog_recover_buffer_pass1(log, item); - case XFS_LI_QUOTAOFF: - return xlog_recover_quotaoff_pass1(log, item); - case XFS_LI_INODE: - case XFS_LI_EFI: - case XFS_LI_EFD: - case XFS_LI_DQUOT: - case XFS_LI_ICREATE: - case XFS_LI_RUI: - case XFS_LI_RUD: - case XFS_LI_CUI: - case XFS_LI_CUD: - case XFS_LI_BUI: - case XFS_LI_BUD: - /* nothing to do in pass 1 */ - return 0; - default: - xfs_warn(log->l_mp, "%s: invalid item type (%d)", - __func__, ITEM_TYPE(item)); - ASSERT(0); - return -EFSCORRUPTED; - } -} - -STATIC int -xlog_recover_commit_pass2( - struct xlog *log, - struct xlog_recover *trans, - struct list_head *buffer_list, - struct xlog_recover_item *item) -{ - trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); - - switch (ITEM_TYPE(item)) { - case XFS_LI_BUF: - return xlog_recover_buffer_pass2(log, buffer_list, item, - trans->r_lsn); - case XFS_LI_INODE: - return xlog_recover_inode_pass2(log, buffer_list, item, - trans->r_lsn); - case XFS_LI_EFI: - return xlog_recover_efi_pass2(log, item, trans->r_lsn); - case XFS_LI_EFD: - return xlog_recover_efd_pass2(log, item); - case XFS_LI_RUI: - return xlog_recover_rui_pass2(log, item, trans->r_lsn); - case XFS_LI_RUD: - return xlog_recover_rud_pass2(log, item); - case XFS_LI_CUI: - return xlog_recover_cui_pass2(log, item, trans->r_lsn); - case XFS_LI_CUD: - return xlog_recover_cud_pass2(log, item); - case XFS_LI_BUI: - return xlog_recover_bui_pass2(log, item, trans->r_lsn); - case XFS_LI_BUD: - return xlog_recover_bud_pass2(log, item); - case XFS_LI_DQUOT: - return xlog_recover_dquot_pass2(log, buffer_list, item, - trans->r_lsn); - case XFS_LI_ICREATE: - return xlog_recover_do_icreate_pass2(log, buffer_list, item); - case XFS_LI_QUOTAOFF: - /* nothing to do in pass2 */ - return 0; - default: - xfs_warn(log->l_mp, "%s: invalid item type (%d)", - __func__, ITEM_TYPE(item)); - ASSERT(0); - return -EFSCORRUPTED; - } + if (!xlog_is_buffer_cancelled(log, blkno, len)) + xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops); } STATIC int @@ -4072,8 +1967,12 @@ xlog_recover_items_pass2( int error = 0; list_for_each_entry(item, item_list, ri_list) { - error = xlog_recover_commit_pass2(log, trans, - buffer_list, item); + trace_xfs_log_recover_item_recover(log, trans, item, + XLOG_RECOVER_PASS2); + + if (item->ri_ops->commit_pass2) + error = item->ri_ops->commit_pass2(log, buffer_list, + item, trans->r_lsn); if (error) return error; } @@ -4110,12 +2009,16 @@ xlog_recover_commit_trans( return error; list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) { + trace_xfs_log_recover_item_recover(log, trans, item, pass); + switch (pass) { case XLOG_RECOVER_PASS1: - error = xlog_recover_commit_pass1(log, trans, item); + if (item->ri_ops->commit_pass1) + error = item->ri_ops->commit_pass1(log, item); break; case XLOG_RECOVER_PASS2: - xlog_recover_ra_pass2(log, item); + if (item->ri_ops->ra_pass2) + item->ri_ops->ra_pass2(log, item); list_move_tail(&item->ri_list, &ra_list); items_queued++; if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) { @@ -4152,9 +2055,9 @@ STATIC void xlog_recover_add_item( struct list_head *head) { - xlog_recover_item_t *item; + struct xlog_recover_item *item; - item = kmem_zalloc(sizeof(xlog_recover_item_t), 0); + item = kmem_zalloc(sizeof(struct xlog_recover_item), 0); INIT_LIST_HEAD(&item->ri_list); list_add_tail(&item->ri_list, head); } @@ -4166,7 +2069,7 @@ xlog_recover_add_to_cont_trans( char *dp, int len) { - xlog_recover_item_t *item; + struct xlog_recover_item *item; char *ptr, *old_ptr; int old_len; @@ -4189,7 +2092,8 @@ xlog_recover_add_to_cont_trans( } /* take the tail entry */ - item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); + item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, + ri_list); old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; old_len = item->ri_buf[item->ri_cnt-1].i_len; @@ -4223,7 +2127,7 @@ xlog_recover_add_to_trans( int len) { struct xfs_inode_log_format *in_f; /* any will do */ - xlog_recover_item_t *item; + struct xlog_recover_item *item; char *ptr; if (!len) @@ -4259,13 +2163,14 @@ xlog_recover_add_to_trans( in_f = (struct xfs_inode_log_format *)ptr; /* take the tail entry */ - item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); + item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, + ri_list); if (item->ri_total != 0 && item->ri_total == item->ri_cnt) { /* tail item is in use, get a new one */ xlog_recover_add_item(&trans->r_itemq); item = list_entry(trans->r_itemq.prev, - xlog_recover_item_t, ri_list); + struct xlog_recover_item, ri_list); } if (item->ri_total == 0) { /* first region to be added */ @@ -4311,7 +2216,7 @@ STATIC void xlog_recover_free_trans( struct xlog_recover *trans) { - xlog_recover_item_t *item, *n; + struct xlog_recover_item *item, *n; int i; hlist_del_init(&trans->r_list); @@ -4563,180 +2468,6 @@ xlog_recover_process_data( return 0; } -/* Recover the EFI if necessary. */ -STATIC int -xlog_recover_process_efi( - struct xfs_mount *mp, - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_efi_log_item *efip; - int error; - - /* - * Skip EFIs that we've already processed. - */ - efip = container_of(lip, struct xfs_efi_log_item, efi_item); - if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) - return 0; - - spin_unlock(&ailp->ail_lock); - error = xfs_efi_recover(mp, efip); - spin_lock(&ailp->ail_lock); - - return error; -} - -/* Release the EFI since we're cancelling everything. */ -STATIC void -xlog_recover_cancel_efi( - struct xfs_mount *mp, - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_efi_log_item *efip; - - efip = container_of(lip, struct xfs_efi_log_item, efi_item); - - spin_unlock(&ailp->ail_lock); - xfs_efi_release(efip); - spin_lock(&ailp->ail_lock); -} - -/* Recover the RUI if necessary. */ -STATIC int -xlog_recover_process_rui( - struct xfs_mount *mp, - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_rui_log_item *ruip; - int error; - - /* - * Skip RUIs that we've already processed. - */ - ruip = container_of(lip, struct xfs_rui_log_item, rui_item); - if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags)) - return 0; - - spin_unlock(&ailp->ail_lock); - error = xfs_rui_recover(mp, ruip); - spin_lock(&ailp->ail_lock); - - return error; -} - -/* Release the RUI since we're cancelling everything. */ -STATIC void -xlog_recover_cancel_rui( - struct xfs_mount *mp, - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_rui_log_item *ruip; - - ruip = container_of(lip, struct xfs_rui_log_item, rui_item); - - spin_unlock(&ailp->ail_lock); - xfs_rui_release(ruip); - spin_lock(&ailp->ail_lock); -} - -/* Recover the CUI if necessary. */ -STATIC int -xlog_recover_process_cui( - struct xfs_trans *parent_tp, - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_cui_log_item *cuip; - int error; - - /* - * Skip CUIs that we've already processed. - */ - cuip = container_of(lip, struct xfs_cui_log_item, cui_item); - if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags)) - return 0; - - spin_unlock(&ailp->ail_lock); - error = xfs_cui_recover(parent_tp, cuip); - spin_lock(&ailp->ail_lock); - - return error; -} - -/* Release the CUI since we're cancelling everything. */ -STATIC void -xlog_recover_cancel_cui( - struct xfs_mount *mp, - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_cui_log_item *cuip; - - cuip = container_of(lip, struct xfs_cui_log_item, cui_item); - - spin_unlock(&ailp->ail_lock); - xfs_cui_release(cuip); - spin_lock(&ailp->ail_lock); -} - -/* Recover the BUI if necessary. */ -STATIC int -xlog_recover_process_bui( - struct xfs_trans *parent_tp, - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_bui_log_item *buip; - int error; - - /* - * Skip BUIs that we've already processed. - */ - buip = container_of(lip, struct xfs_bui_log_item, bui_item); - if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)) - return 0; - - spin_unlock(&ailp->ail_lock); - error = xfs_bui_recover(parent_tp, buip); - spin_lock(&ailp->ail_lock); - - return error; -} - -/* Release the BUI since we're cancelling everything. */ -STATIC void -xlog_recover_cancel_bui( - struct xfs_mount *mp, - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_bui_log_item *buip; - - buip = container_of(lip, struct xfs_bui_log_item, bui_item); - - spin_unlock(&ailp->ail_lock); - xfs_bui_release(buip); - spin_lock(&ailp->ail_lock); -} - -/* Is this log item a deferred action intent? */ -static inline bool xlog_item_is_intent(struct xfs_log_item *lip) -{ - switch (lip->li_type) { - case XFS_LI_EFI: - case XFS_LI_RUI: - case XFS_LI_CUI: - case XFS_LI_BUI: - return true; - default: - return false; - } -} - /* Take all the collected deferred ops and finish them in order. */ static int xlog_finish_defer_ops( @@ -4771,6 +2502,13 @@ xlog_finish_defer_ops( return xfs_trans_commit(tp); } +/* Is this log item a deferred action intent? */ +static inline bool xlog_item_is_intent(struct xfs_log_item *lip) +{ + return lip->li_ops->iop_recover != NULL && + lip->li_ops->iop_match != NULL; +} + /* * When this is called, all of the log intent items which did not have * corresponding log done items should be in the AIL. What we do now @@ -4841,23 +2579,14 @@ xlog_recover_process_intents( /* * NOTE: If your intent processing routine can create more - * deferred ops, you /must/ attach them to the dfops in this - * routine or else those subsequent intents will get + * deferred ops, you /must/ attach them to the transaction in + * this routine or else those subsequent intents will get * replayed in the wrong order! */ - switch (lip->li_type) { - case XFS_LI_EFI: - error = xlog_recover_process_efi(log->l_mp, ailp, lip); - break; - case XFS_LI_RUI: - error = xlog_recover_process_rui(log->l_mp, ailp, lip); - break; - case XFS_LI_CUI: - error = xlog_recover_process_cui(parent_tp, ailp, lip); - break; - case XFS_LI_BUI: - error = xlog_recover_process_bui(parent_tp, ailp, lip); - break; + if (!test_and_set_bit(XFS_LI_RECOVERED, &lip->li_flags)) { + spin_unlock(&ailp->ail_lock); + error = lip->li_ops->iop_recover(lip, parent_tp); + spin_lock(&ailp->ail_lock); } if (error) goto out; @@ -4901,21 +2630,9 @@ xlog_recover_cancel_intents( break; } - switch (lip->li_type) { - case XFS_LI_EFI: - xlog_recover_cancel_efi(log->l_mp, ailp, lip); - break; - case XFS_LI_RUI: - xlog_recover_cancel_rui(log->l_mp, ailp, lip); - break; - case XFS_LI_CUI: - xlog_recover_cancel_cui(log->l_mp, ailp, lip); - break; - case XFS_LI_BUI: - xlog_recover_cancel_bui(log->l_mp, ailp, lip); - break; - } - + spin_unlock(&ailp->ail_lock); + lip->li_ops->iop_release(lip); + spin_lock(&ailp->ail_lock); lip = xfs_trans_ail_cursor_next(ailp, &cur); } @@ -4987,7 +2704,7 @@ xlog_recover_process_one_iunlink( /* * Get the on disk inode to find the next inode in the bucket. */ - error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0); + error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0); if (error) goto fail_iput; diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c index e0f9d3b6abe9..bc66d95c8d4c 100644 --- a/fs/xfs/xfs_message.c +++ b/fs/xfs/xfs_message.c @@ -117,3 +117,25 @@ xfs_hex_dump(const void *p, int length) { print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1); } + +void +xfs_buf_alert_ratelimited( + struct xfs_buf *bp, + const char *rlmsg, + const char *fmt, + ...) +{ + struct xfs_mount *mp = bp->b_mount; + struct va_format vaf; + va_list args; + + /* use the more aggressive per-target rate limit for buffers */ + if (!___ratelimit(&bp->b_target->bt_ioerror_rl, rlmsg)) + return; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + __xfs_printk(KERN_ALERT, mp, &vaf); + va_end(args); +} diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h index 0b05e10995a0..4d9bd6bb63ca 100644 --- a/fs/xfs/xfs_message.h +++ b/fs/xfs/xfs_message.h @@ -31,15 +31,27 @@ void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) } #endif -#define xfs_printk_ratelimited(func, dev, fmt, ...) \ +#define xfs_printk_ratelimited(func, dev, fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ if (__ratelimit(&_rs)) \ - func(dev, fmt, ##__VA_ARGS__); \ + func(dev, fmt, ##__VA_ARGS__); \ } while (0) +#define xfs_printk_once(func, dev, fmt, ...) \ +({ \ + static bool __section(.data.once) __print_once; \ + bool __ret_print_once = !__print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + func(dev, fmt, ##__VA_ARGS__); \ + } \ + unlikely(__ret_print_once); \ +}) + #define xfs_emerg_ratelimited(dev, fmt, ...) \ xfs_printk_ratelimited(xfs_emerg, dev, fmt, ##__VA_ARGS__) #define xfs_alert_ratelimited(dev, fmt, ...) \ @@ -57,9 +69,17 @@ do { \ #define xfs_debug_ratelimited(dev, fmt, ...) \ xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__) +#define xfs_warn_once(dev, fmt, ...) \ + xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__) +#define xfs_notice_once(dev, fmt, ...) \ + xfs_printk_once(xfs_notice, dev, fmt, ##__VA_ARGS__) + void assfail(struct xfs_mount *mp, char *expr, char *f, int l); void asswarn(struct xfs_mount *mp, char *expr, char *f, int l); extern void xfs_hex_dump(const void *p, int length); +void xfs_buf_alert_ratelimited(struct xfs_buf *bp, const char *rlmsg, + const char *fmt, ...); + #endif /* __XFS_MESSAGE_H */ diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index c5513e5a226a..d5dcf9869860 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1190,39 +1190,6 @@ xfs_log_sbcount(xfs_mount_t *mp) } /* - * Deltas for the inode count are +/-64, hence we use a large batch size - * of 128 so we don't need to take the counter lock on every update. - */ -#define XFS_ICOUNT_BATCH 128 -int -xfs_mod_icount( - struct xfs_mount *mp, - int64_t delta) -{ - percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH); - if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) { - ASSERT(0); - percpu_counter_add(&mp->m_icount, -delta); - return -EINVAL; - } - return 0; -} - -int -xfs_mod_ifree( - struct xfs_mount *mp, - int64_t delta) -{ - percpu_counter_add(&mp->m_ifree, delta); - if (percpu_counter_compare(&mp->m_ifree, 0) < 0) { - ASSERT(0); - percpu_counter_add(&mp->m_ifree, -delta); - return -EINVAL; - } - return 0; -} - -/* * Deltas for the block count can vary from 1 to very large, but lock contention * only occurs on frequent small block count updates such as in the delayed * allocation path for buffered writes (page a time updates). Hence we set @@ -1300,10 +1267,9 @@ xfs_mod_fdblocks( spin_unlock(&mp->m_sb_lock); return 0; } - printk_once(KERN_WARNING - "Filesystem \"%s\": reserve blocks depleted! " - "Consider increasing reserve pool size.", - mp->m_super->s_id); + xfs_warn_once(mp, +"Reserve blocks depleted! Consider increasing reserve pool size."); + fdblocks_enospc: spin_unlock(&mp->m_sb_lock); return -ENOSPC; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index b2e4598fdf7d..3725d25ad97e 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -55,61 +55,25 @@ struct xfs_error_cfg { long retry_timeout; /* in jiffies, -1 = infinite */ }; +/* + * The struct xfsmount layout is optimised to separate read-mostly variables + * from variables that are frequently modified. We put the read-mostly variables + * first, then place all the other variables at the end. + * + * Typically, read-mostly variables are those that are set at mount time and + * never changed again, or only change rarely as a result of things like sysfs + * knobs being tweaked. + */ typedef struct xfs_mount { + struct xfs_sb m_sb; /* copy of fs superblock */ struct super_block *m_super; - - /* - * Bitsets of per-fs metadata that have been checked and/or are sick. - * Callers must hold m_sb_lock to access these two fields. - */ - uint8_t m_fs_checked; - uint8_t m_fs_sick; - /* - * Bitsets of rt metadata that have been checked and/or are sick. - * Callers must hold m_sb_lock to access this field. - */ - uint8_t m_rt_checked; - uint8_t m_rt_sick; - struct xfs_ail *m_ail; /* fs active log item list */ - - struct xfs_sb m_sb; /* copy of fs superblock */ - spinlock_t m_sb_lock; /* sb counter lock */ - struct percpu_counter m_icount; /* allocated inodes counter */ - struct percpu_counter m_ifree; /* free inodes counter */ - struct percpu_counter m_fdblocks; /* free block counter */ - /* - * Count of data device blocks reserved for delayed allocations, - * including indlen blocks. Does not include allocated CoW staging - * extents or anything related to the rt device. - */ - struct percpu_counter m_delalloc_blks; - struct xfs_buf *m_sb_bp; /* buffer for superblock */ char *m_rtname; /* realtime device name */ char *m_logname; /* external log device name */ - int m_bsize; /* fs logical block size */ - xfs_agnumber_t m_agfrotor; /* last ag where space found */ - xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ - spinlock_t m_agirotor_lock;/* .. and lock protecting it */ - xfs_agnumber_t m_maxagi; /* highest inode alloc group */ - uint m_allocsize_log;/* min write size log bytes */ - uint m_allocsize_blocks; /* min write size blocks */ struct xfs_da_geometry *m_dir_geo; /* directory block geometry */ struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */ struct xlog *m_log; /* log specific stuff */ - struct xfs_ino_geometry m_ino_geo; /* inode geometry */ - int m_logbufs; /* number of log buffers */ - int m_logbsize; /* size of each log buffer */ - uint m_rsumlevels; /* rt summary levels */ - uint m_rsumsize; /* size of rt summary, bytes */ - /* - * Optional cache of rt summary level per bitmap block with the - * invariant that m_rsum_cache[bbno] <= the minimum i for which - * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip - * inode lock. - */ - uint8_t *m_rsum_cache; struct xfs_inode *m_rbmip; /* pointer to bitmap inode */ struct xfs_inode *m_rsumip; /* pointer to summary inode */ struct xfs_inode *m_rootip; /* pointer to root directory */ @@ -117,9 +81,26 @@ typedef struct xfs_mount { xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ + /* + * Optional cache of rt summary level per bitmap block with the + * invariant that m_rsum_cache[bbno] <= the minimum i for which + * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip + * inode lock. + */ + uint8_t *m_rsum_cache; + struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ + struct workqueue_struct *m_buf_workqueue; + struct workqueue_struct *m_unwritten_workqueue; + struct workqueue_struct *m_cil_workqueue; + struct workqueue_struct *m_reclaim_workqueue; + struct workqueue_struct *m_eofblocks_workqueue; + struct workqueue_struct *m_sync_workqueue; + + int m_bsize; /* fs logical block size */ uint8_t m_blkbit_log; /* blocklog + NBBY */ uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ uint8_t m_agno_log; /* log #ag's */ + uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ uint m_blockmask; /* sb_blocksize-1 */ uint m_blockwsize; /* sb_blocksize in words */ uint m_blockwmask; /* blockwsize-1 */ @@ -138,47 +119,82 @@ typedef struct xfs_mount { xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */ uint m_alloc_set_aside; /* space we can't use */ uint m_ag_max_usable; /* max space per AG */ - struct radix_tree_root m_perag_tree; /* per-ag accounting info */ - spinlock_t m_perag_lock; /* lock for m_perag_tree */ - struct mutex m_growlock; /* growfs mutex */ + int m_dalign; /* stripe unit */ + int m_swidth; /* stripe width */ + xfs_agnumber_t m_maxagi; /* highest inode alloc group */ + uint m_allocsize_log;/* min write size log bytes */ + uint m_allocsize_blocks; /* min write size blocks */ + int m_logbufs; /* number of log buffers */ + int m_logbsize; /* size of each log buffer */ + uint m_rsumlevels; /* rt summary levels */ + uint m_rsumsize; /* size of rt summary, bytes */ int m_fixedfsid[2]; /* unchanged for life of FS */ - uint64_t m_flags; /* global mount flags */ - bool m_finobt_nores; /* no per-AG finobt resv. */ uint m_qflags; /* quota status flags */ + uint64_t m_flags; /* global mount flags */ + int64_t m_low_space[XFS_LOWSP_MAX]; + struct xfs_ino_geometry m_ino_geo; /* inode geometry */ struct xfs_trans_resv m_resv; /* precomputed res values */ + /* low free space thresholds */ + bool m_always_cow; + bool m_fail_unmount; + bool m_finobt_nores; /* no per-AG finobt resv. */ + bool m_update_sb; /* sb needs update in mount */ + + /* + * Bitsets of per-fs metadata that have been checked and/or are sick. + * Callers must hold m_sb_lock to access these two fields. + */ + uint8_t m_fs_checked; + uint8_t m_fs_sick; + /* + * Bitsets of rt metadata that have been checked and/or are sick. + * Callers must hold m_sb_lock to access this field. + */ + uint8_t m_rt_checked; + uint8_t m_rt_sick; + + /* + * End of read-mostly variables. Frequently written variables and locks + * should be placed below this comment from now on. The first variable + * here is marked as cacheline aligned so they it is separated from + * the read-mostly variables. + */ + + spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */ + struct percpu_counter m_icount; /* allocated inodes counter */ + struct percpu_counter m_ifree; /* free inodes counter */ + struct percpu_counter m_fdblocks; /* free block counter */ + /* + * Count of data device blocks reserved for delayed allocations, + * including indlen blocks. Does not include allocated CoW staging + * extents or anything related to the rt device. + */ + struct percpu_counter m_delalloc_blks; + + struct radix_tree_root m_perag_tree; /* per-ag accounting info */ + spinlock_t m_perag_lock; /* lock for m_perag_tree */ uint64_t m_resblks; /* total reserved blocks */ uint64_t m_resblks_avail;/* available reserved blocks */ uint64_t m_resblks_save; /* reserved blks @ remount,ro */ - int m_dalign; /* stripe unit */ - int m_swidth; /* stripe width */ - uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ - atomic_t m_active_trans; /* number trans frozen */ - struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ struct delayed_work m_reclaim_work; /* background inode reclaim */ struct delayed_work m_eofblocks_work; /* background eof blocks trimming */ struct delayed_work m_cowblocks_work; /* background cow blocks trimming */ - bool m_update_sb; /* sb needs update in mount */ - int64_t m_low_space[XFS_LOWSP_MAX]; - /* low free space thresholds */ struct xfs_kobj m_kobj; struct xfs_kobj m_error_kobj; struct xfs_kobj m_error_meta_kobj; struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX]; struct xstats m_stats; /* per-fs stats */ + xfs_agnumber_t m_agfrotor; /* last ag where space found */ + xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ + spinlock_t m_agirotor_lock;/* .. and lock protecting it */ /* * Workqueue item so that we can coalesce multiple inode flush attempts * into a single flush. */ struct work_struct m_flush_inodes_work; - struct workqueue_struct *m_buf_workqueue; - struct workqueue_struct *m_unwritten_workqueue; - struct workqueue_struct *m_cil_workqueue; - struct workqueue_struct *m_reclaim_workqueue; - struct workqueue_struct *m_eofblocks_workqueue; - struct workqueue_struct *m_sync_workqueue; /* * Generation of the filesysyem layout. This is incremented by each @@ -190,9 +206,8 @@ typedef struct xfs_mount { * to various other kinds of pain inflicted on the pNFS server. */ uint32_t m_generation; + struct mutex m_growlock; /* growfs mutex */ - bool m_always_cow; - bool m_fail_unmount; #ifdef DEBUG /* * Frequency with which errors are injected. Replaces xfs_etest; the @@ -237,8 +252,8 @@ typedef struct xfs_mount { #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams allocator */ #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ - -#define XFS_MOUNT_DAX (1ULL << 62) /* TEST ONLY! */ +#define XFS_MOUNT_DAX_ALWAYS (1ULL << 26) +#define XFS_MOUNT_DAX_NEVER (1ULL << 27) /* * Max and min values for mount-option defined I/O @@ -259,8 +274,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ #define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */ -#define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */ -#define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */ /* * Flags for xfs_mountfs @@ -394,8 +407,6 @@ extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount, xfs_agnumber_t *maxagi); extern void xfs_unmountfs(xfs_mount_t *); -extern int xfs_mod_icount(struct xfs_mount *mp, int64_t delta); -extern int xfs_mod_ifree(struct xfs_mount *mp, int64_t delta); extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved); extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta); diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index bb3008d390aa..b101feb2aab4 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -58,9 +58,8 @@ xfs_fs_get_uuid( { struct xfs_mount *mp = XFS_M(sb); - printk_once(KERN_NOTICE -"XFS (%s): using experimental pNFS feature, use at your own risk!\n", - mp->m_super->s_id); + xfs_notice_once(mp, +"Using experimental pNFS feature, use at your own risk!"); if (*len < sizeof(uuid_t)) return -EINVAL; diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index c225691fad15..d6cd83317344 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -558,7 +558,7 @@ xfs_qm_set_defquota( return; ddqp = &dqp->q_core; - defq = xfs_get_defquota(dqp, qinf); + defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp)); /* * Timers and warnings have been already set, let's just set the @@ -577,19 +577,22 @@ xfs_qm_set_defquota( static void xfs_qm_init_timelimits( struct xfs_mount *mp, - struct xfs_quotainfo *qinf) + uint type) { + struct xfs_quotainfo *qinf = mp->m_quotainfo; + struct xfs_def_quota *defq; struct xfs_disk_dquot *ddqp; struct xfs_dquot *dqp; - uint type; int error; - qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; - qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; - qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; - qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; - qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; - qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; + defq = xfs_get_defquota(qinf, type); + + defq->btimelimit = XFS_QM_BTIMELIMIT; + defq->itimelimit = XFS_QM_ITIMELIMIT; + defq->rtbtimelimit = XFS_QM_RTBTIMELIMIT; + defq->bwarnlimit = XFS_QM_BWARNLIMIT; + defq->iwarnlimit = XFS_QM_IWARNLIMIT; + defq->rtbwarnlimit = XFS_QM_RTBWARNLIMIT; /* * We try to get the limits from the superuser's limits fields. @@ -597,39 +600,30 @@ xfs_qm_init_timelimits( * * Since we may not have done a quotacheck by this point, just read * the dquot without attaching it to any hashtables or lists. - * - * Timers and warnings are globally set by the first timer found in - * user/group/proj quota types, otherwise a default value is used. - * This should be split into different fields per quota type. */ - if (XFS_IS_UQUOTA_RUNNING(mp)) - type = XFS_DQ_USER; - else if (XFS_IS_GQUOTA_RUNNING(mp)) - type = XFS_DQ_GROUP; - else - type = XFS_DQ_PROJ; error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); if (error) return; ddqp = &dqp->q_core; + /* * The warnings and timers set the grace period given to * a user or group before he or she can not perform any * more writing. If it is zero, a default is used. */ if (ddqp->d_btimer) - qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer); + defq->btimelimit = be32_to_cpu(ddqp->d_btimer); if (ddqp->d_itimer) - qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer); + defq->itimelimit = be32_to_cpu(ddqp->d_itimer); if (ddqp->d_rtbtimer) - qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer); + defq->rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer); if (ddqp->d_bwarns) - qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns); + defq->bwarnlimit = be16_to_cpu(ddqp->d_bwarns); if (ddqp->d_iwarns) - qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns); + defq->iwarnlimit = be16_to_cpu(ddqp->d_iwarns); if (ddqp->d_rtbwarns) - qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns); + defq->rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns); xfs_qm_dqdestroy(dqp); } @@ -675,7 +669,9 @@ xfs_qm_init_quotainfo( mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); - xfs_qm_init_timelimits(mp, qinf); + xfs_qm_init_timelimits(mp, XFS_DQ_USER); + xfs_qm_init_timelimits(mp, XFS_DQ_GROUP); + xfs_qm_init_timelimits(mp, XFS_DQ_PROJ); if (XFS_IS_UQUOTA_RUNNING(mp)) xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf); @@ -780,7 +776,8 @@ xfs_qm_qino_alloc( } error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create, - XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp); + need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0, + 0, 0, &tp); if (error) return error; @@ -1116,7 +1113,7 @@ xfs_qm_quotacheck_dqadjust( */ if (dqp->q_core.d_id) { xfs_qm_adjust_dqlimits(mp, dqp); - xfs_qm_adjust_dqtimers(mp, &dqp->q_core); + xfs_qm_adjust_dqtimers(mp, dqp); } dqp->dq_flags |= XFS_DQ_DIRTY; @@ -1730,8 +1727,7 @@ xfs_qm_vop_dqalloc( pq = xfs_qm_dqhold(ip->i_pdquot); } } - if (uq) - trace_xfs_dquot_dqalloc(ip); + trace_xfs_dquot_dqalloc(ip); xfs_iunlock(ip, lockflags); if (O_udqpp) @@ -1808,7 +1804,7 @@ xfs_qm_vop_chown_reserve( { struct xfs_mount *mp = ip->i_mount; uint64_t delblks; - unsigned int blkflags, prjflags = 0; + unsigned int blkflags; struct xfs_dquot *udq_unres = NULL; struct xfs_dquot *gdq_unres = NULL; struct xfs_dquot *pdq_unres = NULL; @@ -1849,7 +1845,6 @@ xfs_qm_vop_chown_reserve( if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) { - prjflags = XFS_QMOPT_ENOSPC; pdq_delblks = pdqp; if (delblks) { ASSERT(ip->i_pdquot); @@ -1859,8 +1854,7 @@ xfs_qm_vop_chown_reserve( error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, udq_delblks, gdq_delblks, pdq_delblks, - ip->i_d.di_nblocks, 1, - flags | blkflags | prjflags); + ip->i_d.di_nblocks, 1, flags | blkflags); if (error) return error; @@ -1878,8 +1872,7 @@ xfs_qm_vop_chown_reserve( ASSERT(udq_unres || gdq_unres || pdq_unres); error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, udq_delblks, gdq_delblks, pdq_delblks, - (xfs_qcnt_t)delblks, 0, - flags | blkflags | prjflags); + (xfs_qcnt_t)delblks, 0, flags | blkflags); if (error) return error; xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, @@ -1932,7 +1925,6 @@ xfs_qm_vop_create_dqattach( return; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if (udqp && XFS_IS_UQUOTA_ON(mp)) { ASSERT(ip->i_udquot == NULL); diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 4e57edca8bce..7b0e771fcbce 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -41,13 +41,20 @@ extern struct kmem_zone *xfs_qm_dqtrxzone; */ #define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 +/* Defaults for each quota type: time limits, warn limits, usage limits */ struct xfs_def_quota { - xfs_qcnt_t bhardlimit; /* default data blk hard limit */ - xfs_qcnt_t bsoftlimit; /* default data blk soft limit */ - xfs_qcnt_t ihardlimit; /* default inode count hard limit */ - xfs_qcnt_t isoftlimit; /* default inode count soft limit */ - xfs_qcnt_t rtbhardlimit; /* default realtime blk hard limit */ - xfs_qcnt_t rtbsoftlimit; /* default realtime blk soft limit */ + time64_t btimelimit; /* limit for blks timer */ + time64_t itimelimit; /* limit for inodes timer */ + time64_t rtbtimelimit; /* limit for rt blks timer */ + xfs_qwarncnt_t bwarnlimit; /* limit for blks warnings */ + xfs_qwarncnt_t iwarnlimit; /* limit for inodes warnings */ + xfs_qwarncnt_t rtbwarnlimit; /* limit for rt blks warnings */ + xfs_qcnt_t bhardlimit; /* default data blk hard limit */ + xfs_qcnt_t bsoftlimit; /* default data blk soft limit */ + xfs_qcnt_t ihardlimit; /* default inode count hard limit */ + xfs_qcnt_t isoftlimit; /* default inode count soft limit */ + xfs_qcnt_t rtbhardlimit; /* default realtime blk hard limit */ + xfs_qcnt_t rtbsoftlimit; /* default realtime blk soft limit */ }; /* @@ -55,28 +62,22 @@ struct xfs_def_quota { * The mount structure keeps a pointer to this. */ struct xfs_quotainfo { - struct radix_tree_root qi_uquota_tree; - struct radix_tree_root qi_gquota_tree; - struct radix_tree_root qi_pquota_tree; - struct mutex qi_tree_lock; + struct radix_tree_root qi_uquota_tree; + struct radix_tree_root qi_gquota_tree; + struct radix_tree_root qi_pquota_tree; + struct mutex qi_tree_lock; struct xfs_inode *qi_uquotaip; /* user quota inode */ struct xfs_inode *qi_gquotaip; /* group quota inode */ struct xfs_inode *qi_pquotaip; /* project quota inode */ - struct list_lru qi_lru; - int qi_dquots; - time64_t qi_btimelimit; /* limit for blks timer */ - time64_t qi_itimelimit; /* limit for inodes timer */ - time64_t qi_rtbtimelimit;/* limit for rt blks timer */ - xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */ - xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */ - xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */ - struct mutex qi_quotaofflock;/* to serialize quotaoff */ - xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */ - uint qi_dqperchunk; /* # ondisk dqs in above chunk */ + struct list_lru qi_lru; + int qi_dquots; + struct mutex qi_quotaofflock;/* to serialize quotaoff */ + xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */ + uint qi_dqperchunk; /* # ondisk dq in above chunk */ struct xfs_def_quota qi_usr_default; struct xfs_def_quota qi_grp_default; struct xfs_def_quota qi_prj_default; - struct shrinker qi_shrinker; + struct shrinker qi_shrinker; }; static inline struct radix_tree_root * @@ -113,6 +114,17 @@ xfs_quota_inode(xfs_mount_t *mp, uint dq_flags) return NULL; } +static inline int +xfs_dquot_type(struct xfs_dquot *dqp) +{ + if (XFS_QM_ISUDQ(dqp)) + return XFS_DQ_USER; + if (XFS_QM_ISGDQ(dqp)) + return XFS_DQ_GROUP; + ASSERT(XFS_QM_ISPDQ(dqp)); + return XFS_DQ_PROJ; +} + extern void xfs_trans_mod_dquot(struct xfs_trans *tp, struct xfs_dquot *dqp, uint field, int64_t delta); extern void xfs_trans_dqjoin(struct xfs_trans *, struct xfs_dquot *); @@ -164,19 +176,19 @@ extern int xfs_qm_scall_quotaon(struct xfs_mount *, uint); extern int xfs_qm_scall_quotaoff(struct xfs_mount *, uint); static inline struct xfs_def_quota * -xfs_get_defquota(struct xfs_dquot *dqp, struct xfs_quotainfo *qi) +xfs_get_defquota(struct xfs_quotainfo *qi, int type) { - struct xfs_def_quota *defq; - - if (XFS_QM_ISUDQ(dqp)) - defq = &qi->qi_usr_default; - else if (XFS_QM_ISGDQ(dqp)) - defq = &qi->qi_grp_default; - else { - ASSERT(XFS_QM_ISPDQ(dqp)); - defq = &qi->qi_prj_default; + switch (type) { + case XFS_DQ_USER: + return &qi->qi_usr_default; + case XFS_DQ_GROUP: + return &qi->qi_grp_default; + case XFS_DQ_PROJ: + return &qi->qi_prj_default; + default: + ASSERT(0); + return NULL; } - return defq; } #endif /* __XFS_QM_H__ */ diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 5d5ac65aa1cc..7effd7a28136 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c @@ -302,7 +302,7 @@ xfs_qm_scall_trunc_qfile( goto out_unlock; } - ASSERT(ip->i_d.di_nextents == 0); + ASSERT(ip->i_df.if_nextents == 0); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); error = xfs_trans_commit(tp); @@ -357,11 +357,11 @@ xfs_qm_scall_quotaon( int error; uint qf; - flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); /* - * Switching on quota accounting must be done at mount time. + * Switching on quota accounting must be done at mount time, + * only consider quota enforcement stuff here. */ - flags &= ~(XFS_ALL_QUOTA_ACCT); + flags &= XFS_ALL_QUOTA_ENFD; if (flags == 0) { xfs_debug(mp, "%s: zero flags, m_qflags=%x", @@ -479,7 +479,7 @@ xfs_qm_scall_setqlim( goto out_unlock; } - defq = xfs_get_defquota(dqp, q); + defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); xfs_dqunlock(dqp); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp); @@ -555,32 +555,40 @@ xfs_qm_scall_setqlim( ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); if (id == 0) { - /* - * Timelimits for the super user set the relative time - * the other users can be over quota for this file system. - * If it is zero a default is used. Ditto for the default - * soft and hard limit values (already done, above), and - * for warnings. - */ - if (newlim->d_fieldmask & QC_SPC_TIMER) { - q->qi_btimelimit = newlim->d_spc_timer; - ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); - } - if (newlim->d_fieldmask & QC_INO_TIMER) { - q->qi_itimelimit = newlim->d_ino_timer; - ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); - } - if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { - q->qi_rtbtimelimit = newlim->d_rt_spc_timer; - ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); - } if (newlim->d_fieldmask & QC_SPC_WARNS) - q->qi_bwarnlimit = newlim->d_spc_warns; + defq->bwarnlimit = newlim->d_spc_warns; if (newlim->d_fieldmask & QC_INO_WARNS) - q->qi_iwarnlimit = newlim->d_ino_warns; + defq->iwarnlimit = newlim->d_ino_warns; if (newlim->d_fieldmask & QC_RT_SPC_WARNS) - q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; - } else { + defq->rtbwarnlimit = newlim->d_rt_spc_warns; + } + + /* + * Timelimits for the super user set the relative time the other users + * can be over quota for this file system. If it is zero a default is + * used. Ditto for the default soft and hard limit values (already + * done, above), and for warnings. + * + * For other IDs, userspace can bump out the grace period if over + * the soft limit. + */ + if (newlim->d_fieldmask & QC_SPC_TIMER) + ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); + if (newlim->d_fieldmask & QC_INO_TIMER) + ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); + if (newlim->d_fieldmask & QC_RT_SPC_TIMER) + ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); + + if (id == 0) { + if (newlim->d_fieldmask & QC_SPC_TIMER) + defq->btimelimit = newlim->d_spc_timer; + if (newlim->d_fieldmask & QC_INO_TIMER) + defq->itimelimit = newlim->d_ino_timer; + if (newlim->d_fieldmask & QC_RT_SPC_TIMER) + defq->rtbtimelimit = newlim->d_rt_spc_timer; + } + + if (id != 0) { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. @@ -588,7 +596,7 @@ xfs_qm_scall_setqlim( * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ - xfs_qm_adjust_dqtimers(mp, ddq); + xfs_qm_adjust_dqtimers(mp, dqp); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); @@ -729,9 +737,10 @@ xfs_qm_scall_getquota_next( STATIC int xfs_dqrele_inode( struct xfs_inode *ip, - int flags, void *args) { + uint *flags = args; + /* skip quota inodes */ if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || ip == ip->i_mount->m_quotainfo->qi_gquotaip || @@ -743,15 +752,15 @@ xfs_dqrele_inode( } xfs_ilock(ip, XFS_ILOCK_EXCL); - if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { + if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } - if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) { + if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } - if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) { + if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) { xfs_qm_dqrele(ip->i_pdquot); ip->i_pdquot = NULL; } @@ -768,10 +777,10 @@ xfs_dqrele_inode( */ void xfs_qm_dqrele_all_inodes( - struct xfs_mount *mp, - uint flags) + struct xfs_mount *mp, + uint flags) { ASSERT(mp->m_quotainfo); - xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL, - XFS_AGITER_INEW_WAIT); + xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode, + &flags, XFS_ICI_NO_TAG); } diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 38669e827206..bf809b77a316 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c @@ -21,10 +21,10 @@ xfs_qm_fill_state( struct qc_type_state *tstate, struct xfs_mount *mp, struct xfs_inode *ip, - xfs_ino_t ino) + xfs_ino_t ino, + struct xfs_def_quota *defq) { - struct xfs_quotainfo *q = mp->m_quotainfo; - bool tempqip = false; + bool tempqip = false; tstate->ino = ino; if (!ip && ino == NULLFSINO) @@ -36,13 +36,13 @@ xfs_qm_fill_state( } tstate->flags |= QCI_SYSFILE; tstate->blocks = ip->i_d.di_nblocks; - tstate->nextents = ip->i_d.di_nextents; - tstate->spc_timelimit = (u32)q->qi_btimelimit; - tstate->ino_timelimit = (u32)q->qi_itimelimit; - tstate->rt_spc_timelimit = (u32)q->qi_rtbtimelimit; - tstate->spc_warnlimit = q->qi_bwarnlimit; - tstate->ino_warnlimit = q->qi_iwarnlimit; - tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit; + tstate->nextents = ip->i_df.if_nextents; + tstate->spc_timelimit = (u32)defq->btimelimit; + tstate->ino_timelimit = (u32)defq->itimelimit; + tstate->rt_spc_timelimit = (u32)defq->rtbtimelimit; + tstate->spc_warnlimit = defq->bwarnlimit; + tstate->ino_warnlimit = defq->iwarnlimit; + tstate->rt_spc_warnlimit = defq->rtbwarnlimit; if (tempqip) xfs_irele(ip); } @@ -77,11 +77,11 @@ xfs_fs_get_quota_state( state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED; xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip, - mp->m_sb.sb_uquotino); + mp->m_sb.sb_uquotino, &q->qi_usr_default); xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp, q->qi_gquotaip, - mp->m_sb.sb_gquotino); + mp->m_sb.sb_gquotino, &q->qi_grp_default); xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp, q->qi_pquotaip, - mp->m_sb.sb_pquotino); + mp->m_sb.sb_pquotino, &q->qi_prj_default); return 0; } @@ -109,8 +109,8 @@ xfs_fs_set_info( int type, struct qc_info *info) { - struct xfs_mount *mp = XFS_M(sb); - struct qc_dqblk newlim; + struct xfs_mount *mp = XFS_M(sb); + struct qc_dqblk newlim; if (sb_rdonly(sb)) return -EROFS; diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index 8eeed73928cd..c81639891e29 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -18,16 +18,20 @@ #include "xfs_log.h" #include "xfs_refcount.h" #include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" kmem_zone_t *xfs_cui_zone; kmem_zone_t *xfs_cud_zone; +static const struct xfs_item_ops xfs_cui_item_ops; + static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip) { return container_of(lip, struct xfs_cui_log_item, cui_item); } -void +STATIC void xfs_cui_item_free( struct xfs_cui_log_item *cuip) { @@ -44,13 +48,13 @@ xfs_cui_item_free( * committed vs unpin operations in bulk insert operations. Hence the reference * count to ensure only the last caller frees the CUI. */ -void +STATIC void xfs_cui_release( struct xfs_cui_log_item *cuip) { ASSERT(atomic_read(&cuip->cui_refcount) > 0); if (atomic_dec_and_test(&cuip->cui_refcount)) { - xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR); + xfs_trans_ail_delete(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR); xfs_cui_item_free(cuip); } } @@ -123,17 +127,10 @@ xfs_cui_item_release( xfs_cui_release(CUI_ITEM(lip)); } -static const struct xfs_item_ops xfs_cui_item_ops = { - .iop_size = xfs_cui_item_size, - .iop_format = xfs_cui_item_format, - .iop_unpin = xfs_cui_item_unpin, - .iop_release = xfs_cui_item_release, -}; - /* * Allocate and initialize an cui item with the given number of extents. */ -struct xfs_cui_log_item * +STATIC struct xfs_cui_log_item * xfs_cui_init( struct xfs_mount *mp, uint nextents) @@ -284,27 +281,6 @@ xfs_refcount_update_diff_items( XFS_FSB_TO_AGNO(mp, rb->ri_startblock); } -/* Get an CUI. */ -STATIC void * -xfs_refcount_update_create_intent( - struct xfs_trans *tp, - unsigned int count) -{ - struct xfs_cui_log_item *cuip; - - ASSERT(tp != NULL); - ASSERT(count > 0); - - cuip = xfs_cui_init(tp->t_mountp, count); - ASSERT(cuip != NULL); - - /* - * Get a log_item_desc to point at the new item. - */ - xfs_trans_add_item(tp, &cuip->cui_item); - return cuip; -} - /* Set the phys extent flags for this reverse mapping. */ static void xfs_trans_set_refcount_flags( @@ -328,16 +304,12 @@ xfs_trans_set_refcount_flags( STATIC void xfs_refcount_update_log_item( struct xfs_trans *tp, - void *intent, - struct list_head *item) + struct xfs_cui_log_item *cuip, + struct xfs_refcount_intent *refc) { - struct xfs_cui_log_item *cuip = intent; - struct xfs_refcount_intent *refc; uint next_extent; struct xfs_phys_extent *ext; - refc = container_of(item, struct xfs_refcount_intent, ri_list); - tp->t_flags |= XFS_TRANS_DIRTY; set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags); @@ -354,23 +326,44 @@ xfs_refcount_update_log_item( xfs_trans_set_refcount_flags(ext, refc->ri_type); } +static struct xfs_log_item * +xfs_refcount_update_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count); + struct xfs_refcount_intent *refc; + + ASSERT(count > 0); + + xfs_trans_add_item(tp, &cuip->cui_item); + if (sort) + list_sort(mp, items, xfs_refcount_update_diff_items); + list_for_each_entry(refc, items, ri_list) + xfs_refcount_update_log_item(tp, cuip, refc); + return &cuip->cui_item; +} + /* Get an CUD so we can process all the deferred refcount updates. */ -STATIC void * +static struct xfs_log_item * xfs_refcount_update_create_done( struct xfs_trans *tp, - void *intent, + struct xfs_log_item *intent, unsigned int count) { - return xfs_trans_get_cud(tp, intent); + return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item; } /* Process a deferred refcount update. */ STATIC int xfs_refcount_update_finish_item( struct xfs_trans *tp, + struct xfs_log_item *done, struct list_head *item, - void *done_item, - void **state) + struct xfs_btree_cur **state) { struct xfs_refcount_intent *refc; xfs_fsblock_t new_fsb; @@ -378,12 +371,10 @@ xfs_refcount_update_finish_item( int error; refc = container_of(item, struct xfs_refcount_intent, ri_list); - error = xfs_trans_log_finish_refcount_update(tp, done_item, - refc->ri_type, - refc->ri_startblock, - refc->ri_blockcount, - &new_fsb, &new_aglen, - (struct xfs_btree_cur **)state); + error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), + refc->ri_type, refc->ri_startblock, refc->ri_blockcount, + &new_fsb, &new_aglen, state); + /* Did we run out of reservation? Requeue what we didn't finish. */ if (!error && new_aglen > 0) { ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE || @@ -396,24 +387,12 @@ xfs_refcount_update_finish_item( return error; } -/* Clean up after processing deferred refcounts. */ -STATIC void -xfs_refcount_update_finish_cleanup( - struct xfs_trans *tp, - void *state, - int error) -{ - struct xfs_btree_cur *rcur = state; - - xfs_refcount_finish_one_cleanup(tp, rcur, error); -} - /* Abort all pending CUIs. */ STATIC void xfs_refcount_update_abort_intent( - void *intent) + struct xfs_log_item *intent) { - xfs_cui_release(intent); + xfs_cui_release(CUI_ITEM(intent)); } /* Cancel a deferred refcount update. */ @@ -429,13 +408,11 @@ xfs_refcount_update_cancel_item( const struct xfs_defer_op_type xfs_refcount_update_defer_type = { .max_items = XFS_CUI_MAX_FAST_EXTENTS, - .diff_items = xfs_refcount_update_diff_items, .create_intent = xfs_refcount_update_create_intent, .abort_intent = xfs_refcount_update_abort_intent, - .log_item = xfs_refcount_update_log_item, .create_done = xfs_refcount_update_create_done, .finish_item = xfs_refcount_update_finish_item, - .finish_cleanup = xfs_refcount_update_finish_cleanup, + .finish_cleanup = xfs_refcount_finish_one_cleanup, .cancel_item = xfs_refcount_update_cancel_item, }; @@ -443,28 +420,27 @@ const struct xfs_defer_op_type xfs_refcount_update_defer_type = { * Process a refcount update intent item that was recovered from the log. * We need to update the refcountbt. */ -int -xfs_cui_recover( - struct xfs_trans *parent_tp, - struct xfs_cui_log_item *cuip) +STATIC int +xfs_cui_item_recover( + struct xfs_log_item *lip, + struct xfs_trans *parent_tp) { - int i; - int error = 0; - unsigned int refc_type; + struct xfs_bmbt_irec irec; + struct xfs_cui_log_item *cuip = CUI_ITEM(lip); struct xfs_phys_extent *refc; - xfs_fsblock_t startblock_fsb; - bool op_ok; struct xfs_cud_log_item *cudp; struct xfs_trans *tp; struct xfs_btree_cur *rcur = NULL; - enum xfs_refcount_intent_type type; + struct xfs_mount *mp = parent_tp->t_mountp; + xfs_fsblock_t startblock_fsb; xfs_fsblock_t new_fsb; xfs_extlen_t new_len; - struct xfs_bmbt_irec irec; + unsigned int refc_type; + bool op_ok; bool requeue_only = false; - struct xfs_mount *mp = parent_tp->t_mountp; - - ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags)); + enum xfs_refcount_intent_type type; + int i; + int error = 0; /* * First check the validity of the extents described by the @@ -495,7 +471,6 @@ xfs_cui_recover( * This will pull the CUI from the AIL and * free the memory associated with it. */ - set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); xfs_cui_release(cuip); return -EFSCORRUPTED; } @@ -579,7 +554,6 @@ xfs_cui_recover( } xfs_refcount_finish_one_cleanup(tp, rcur, error); - set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); xfs_defer_move(parent_tp, tp); error = xfs_trans_commit(tp); return error; @@ -590,3 +564,117 @@ abort_error: xfs_trans_cancel(tp); return error; } + +STATIC bool +xfs_cui_item_match( + struct xfs_log_item *lip, + uint64_t intent_id) +{ + return CUI_ITEM(lip)->cui_format.cui_id == intent_id; +} + +static const struct xfs_item_ops xfs_cui_item_ops = { + .iop_size = xfs_cui_item_size, + .iop_format = xfs_cui_item_format, + .iop_unpin = xfs_cui_item_unpin, + .iop_release = xfs_cui_item_release, + .iop_recover = xfs_cui_item_recover, + .iop_match = xfs_cui_item_match, +}; + +/* + * Copy an CUI format buffer from the given buf, and into the destination + * CUI format structure. The CUI/CUD items were designed not to need any + * special alignment handling. + */ +static int +xfs_cui_copy_format( + struct xfs_log_iovec *buf, + struct xfs_cui_log_format *dst_cui_fmt) +{ + struct xfs_cui_log_format *src_cui_fmt; + uint len; + + src_cui_fmt = buf->i_addr; + len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents); + + if (buf->i_len == len) { + memcpy(dst_cui_fmt, src_cui_fmt, len); + return 0; + } + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); + return -EFSCORRUPTED; +} + +/* + * This routine is called to create an in-core extent refcount update + * item from the cui format structure which was logged on disk. + * It allocates an in-core cui, copies the extents from the format + * structure into it, and adds the cui to the AIL with the given + * LSN. + */ +STATIC int +xlog_recover_cui_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + int error; + struct xfs_mount *mp = log->l_mp; + struct xfs_cui_log_item *cuip; + struct xfs_cui_log_format *cui_formatp; + + cui_formatp = item->ri_buf[0].i_addr; + + cuip = xfs_cui_init(mp, cui_formatp->cui_nextents); + error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format); + if (error) { + xfs_cui_item_free(cuip); + return error; + } + atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); + /* + * Insert the intent into the AIL directly and drop one reference so + * that finishing or canceling the work will drop the other. + */ + xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn); + xfs_cui_release(cuip); + return 0; +} + +const struct xlog_recover_item_ops xlog_cui_item_ops = { + .item_type = XFS_LI_CUI, + .commit_pass2 = xlog_recover_cui_commit_pass2, +}; + +/* + * This routine is called when an CUD format structure is found in a committed + * transaction in the log. Its purpose is to cancel the corresponding CUI if it + * was still in the log. To do this it searches the AIL for the CUI with an id + * equal to that in the CUD format structure. If we find it we drop the CUD + * reference, which removes the CUI from the AIL and frees it. + */ +STATIC int +xlog_recover_cud_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_cud_log_format *cud_formatp; + + cud_formatp = item->ri_buf[0].i_addr; + if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); + return -EFSCORRUPTED; + } + + xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id); + return 0; +} + +const struct xlog_recover_item_ops xlog_cud_item_ops = { + .item_type = XFS_LI_CUD, + .commit_pass2 = xlog_recover_cud_commit_pass2, +}; diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h index e47530f30489..f4f2e836540b 100644 --- a/fs/xfs/xfs_refcount_item.h +++ b/fs/xfs/xfs_refcount_item.h @@ -33,11 +33,6 @@ struct kmem_zone; #define XFS_CUI_MAX_FAST_EXTENTS 16 /* - * Define CUI flag bits. Manipulated by set/clear/test_bit operators. - */ -#define XFS_CUI_RECOVERED 1 - -/* * This is the "refcount update intent" log item. It is used to log * the fact that some reverse mappings need to change. It is used in * conjunction with the "refcount update done" log item described @@ -51,7 +46,6 @@ struct xfs_cui_log_item { struct xfs_log_item cui_item; atomic_t cui_refcount; atomic_t cui_next_extent; - unsigned long cui_flags; /* misc flags */ struct xfs_cui_log_format cui_format; }; @@ -77,9 +71,4 @@ struct xfs_cud_log_item { extern struct kmem_zone *xfs_cui_zone; extern struct kmem_zone *xfs_cud_zone; -struct xfs_cui_log_item *xfs_cui_init(struct xfs_mount *, uint); -void xfs_cui_item_free(struct xfs_cui_log_item *); -void xfs_cui_release(struct xfs_cui_log_item *); -int xfs_cui_recover(struct xfs_trans *parent_tp, struct xfs_cui_log_item *cuip); - #endif /* __XFS_REFCOUNT_ITEM_H__ */ diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index 4911b68f95dd..a86599db20a6 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -18,16 +18,20 @@ #include "xfs_log.h" #include "xfs_rmap.h" #include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" kmem_zone_t *xfs_rui_zone; kmem_zone_t *xfs_rud_zone; +static const struct xfs_item_ops xfs_rui_item_ops; + static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip) { return container_of(lip, struct xfs_rui_log_item, rui_item); } -void +STATIC void xfs_rui_item_free( struct xfs_rui_log_item *ruip) { @@ -44,13 +48,13 @@ xfs_rui_item_free( * committed vs unpin operations in bulk insert operations. Hence the reference * count to ensure only the last caller frees the RUI. */ -void +STATIC void xfs_rui_release( struct xfs_rui_log_item *ruip) { ASSERT(atomic_read(&ruip->rui_refcount) > 0); if (atomic_dec_and_test(&ruip->rui_refcount)) { - xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR); + xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR); xfs_rui_item_free(ruip); } } @@ -122,17 +126,10 @@ xfs_rui_item_release( xfs_rui_release(RUI_ITEM(lip)); } -static const struct xfs_item_ops xfs_rui_item_ops = { - .iop_size = xfs_rui_item_size, - .iop_format = xfs_rui_item_format, - .iop_unpin = xfs_rui_item_unpin, - .iop_release = xfs_rui_item_release, -}; - /* * Allocate and initialize an rui item with the given number of extents. */ -struct xfs_rui_log_item * +STATIC struct xfs_rui_log_item * xfs_rui_init( struct xfs_mount *mp, uint nextents) @@ -160,7 +157,7 @@ xfs_rui_init( * RUI format structure. The RUI/RUD items were designed not to need any * special alignment handling. */ -int +STATIC int xfs_rui_copy_format( struct xfs_log_iovec *buf, struct xfs_rui_log_format *dst_rui_fmt) @@ -352,41 +349,16 @@ xfs_rmap_update_diff_items( XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock); } -/* Get an RUI. */ -STATIC void * -xfs_rmap_update_create_intent( - struct xfs_trans *tp, - unsigned int count) -{ - struct xfs_rui_log_item *ruip; - - ASSERT(tp != NULL); - ASSERT(count > 0); - - ruip = xfs_rui_init(tp->t_mountp, count); - ASSERT(ruip != NULL); - - /* - * Get a log_item_desc to point at the new item. - */ - xfs_trans_add_item(tp, &ruip->rui_item); - return ruip; -} - /* Log rmap updates in the intent item. */ STATIC void xfs_rmap_update_log_item( struct xfs_trans *tp, - void *intent, - struct list_head *item) + struct xfs_rui_log_item *ruip, + struct xfs_rmap_intent *rmap) { - struct xfs_rui_log_item *ruip = intent; - struct xfs_rmap_intent *rmap; uint next_extent; struct xfs_map_extent *map; - rmap = container_of(item, struct xfs_rmap_intent, ri_list); - tp->t_flags |= XFS_TRANS_DIRTY; set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags); @@ -406,58 +378,64 @@ xfs_rmap_update_log_item( rmap->ri_bmap.br_state); } +static struct xfs_log_item * +xfs_rmap_update_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count); + struct xfs_rmap_intent *rmap; + + ASSERT(count > 0); + + xfs_trans_add_item(tp, &ruip->rui_item); + if (sort) + list_sort(mp, items, xfs_rmap_update_diff_items); + list_for_each_entry(rmap, items, ri_list) + xfs_rmap_update_log_item(tp, ruip, rmap); + return &ruip->rui_item; +} + /* Get an RUD so we can process all the deferred rmap updates. */ -STATIC void * +static struct xfs_log_item * xfs_rmap_update_create_done( struct xfs_trans *tp, - void *intent, + struct xfs_log_item *intent, unsigned int count) { - return xfs_trans_get_rud(tp, intent); + return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item; } /* Process a deferred rmap update. */ STATIC int xfs_rmap_update_finish_item( struct xfs_trans *tp, + struct xfs_log_item *done, struct list_head *item, - void *done_item, - void **state) + struct xfs_btree_cur **state) { struct xfs_rmap_intent *rmap; int error; rmap = container_of(item, struct xfs_rmap_intent, ri_list); - error = xfs_trans_log_finish_rmap_update(tp, done_item, - rmap->ri_type, - rmap->ri_owner, rmap->ri_whichfork, - rmap->ri_bmap.br_startoff, - rmap->ri_bmap.br_startblock, - rmap->ri_bmap.br_blockcount, - rmap->ri_bmap.br_state, - (struct xfs_btree_cur **)state); + error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done), + rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork, + rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock, + rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state, + state); kmem_free(rmap); return error; } -/* Clean up after processing deferred rmaps. */ -STATIC void -xfs_rmap_update_finish_cleanup( - struct xfs_trans *tp, - void *state, - int error) -{ - struct xfs_btree_cur *rcur = state; - - xfs_rmap_finish_one_cleanup(tp, rcur, error); -} - /* Abort all pending RUIs. */ STATIC void xfs_rmap_update_abort_intent( - void *intent) + struct xfs_log_item *intent) { - xfs_rui_release(intent); + xfs_rui_release(RUI_ITEM(intent)); } /* Cancel a deferred rmap update. */ @@ -473,13 +451,11 @@ xfs_rmap_update_cancel_item( const struct xfs_defer_op_type xfs_rmap_update_defer_type = { .max_items = XFS_RUI_MAX_FAST_EXTENTS, - .diff_items = xfs_rmap_update_diff_items, .create_intent = xfs_rmap_update_create_intent, .abort_intent = xfs_rmap_update_abort_intent, - .log_item = xfs_rmap_update_log_item, .create_done = xfs_rmap_update_create_done, .finish_item = xfs_rmap_update_finish_item, - .finish_cleanup = xfs_rmap_update_finish_cleanup, + .finish_cleanup = xfs_rmap_finish_one_cleanup, .cancel_item = xfs_rmap_update_cancel_item, }; @@ -487,24 +463,24 @@ const struct xfs_defer_op_type xfs_rmap_update_defer_type = { * Process an rmap update intent item that was recovered from the log. * We need to update the rmapbt. */ -int -xfs_rui_recover( - struct xfs_mount *mp, - struct xfs_rui_log_item *ruip) +STATIC int +xfs_rui_item_recover( + struct xfs_log_item *lip, + struct xfs_trans *parent_tp) { - int i; - int error = 0; + struct xfs_rui_log_item *ruip = RUI_ITEM(lip); struct xfs_map_extent *rmap; - xfs_fsblock_t startblock_fsb; - bool op_ok; struct xfs_rud_log_item *rudp; - enum xfs_rmap_intent_type type; - int whichfork; - xfs_exntst_t state; struct xfs_trans *tp; struct xfs_btree_cur *rcur = NULL; - - ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags)); + struct xfs_mount *mp = parent_tp->t_mountp; + xfs_fsblock_t startblock_fsb; + enum xfs_rmap_intent_type type; + xfs_exntst_t state; + bool op_ok; + int i; + int whichfork; + int error = 0; /* * First check the validity of the extents described by the @@ -539,7 +515,6 @@ xfs_rui_recover( * This will pull the RUI from the AIL and * free the memory associated with it. */ - set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags); xfs_rui_release(ruip); return -EFSCORRUPTED; } @@ -597,7 +572,6 @@ xfs_rui_recover( } xfs_rmap_finish_one_cleanup(tp, rcur, error); - set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags); error = xfs_trans_commit(tp); return error; @@ -606,3 +580,90 @@ abort_error: xfs_trans_cancel(tp); return error; } + +STATIC bool +xfs_rui_item_match( + struct xfs_log_item *lip, + uint64_t intent_id) +{ + return RUI_ITEM(lip)->rui_format.rui_id == intent_id; +} + +static const struct xfs_item_ops xfs_rui_item_ops = { + .iop_size = xfs_rui_item_size, + .iop_format = xfs_rui_item_format, + .iop_unpin = xfs_rui_item_unpin, + .iop_release = xfs_rui_item_release, + .iop_recover = xfs_rui_item_recover, + .iop_match = xfs_rui_item_match, +}; + +/* + * This routine is called to create an in-core extent rmap update + * item from the rui format structure which was logged on disk. + * It allocates an in-core rui, copies the extents from the format + * structure into it, and adds the rui to the AIL with the given + * LSN. + */ +STATIC int +xlog_recover_rui_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + int error; + struct xfs_mount *mp = log->l_mp; + struct xfs_rui_log_item *ruip; + struct xfs_rui_log_format *rui_formatp; + + rui_formatp = item->ri_buf[0].i_addr; + + ruip = xfs_rui_init(mp, rui_formatp->rui_nextents); + error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format); + if (error) { + xfs_rui_item_free(ruip); + return error; + } + atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); + /* + * Insert the intent into the AIL directly and drop one reference so + * that finishing or canceling the work will drop the other. + */ + xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn); + xfs_rui_release(ruip); + return 0; +} + +const struct xlog_recover_item_ops xlog_rui_item_ops = { + .item_type = XFS_LI_RUI, + .commit_pass2 = xlog_recover_rui_commit_pass2, +}; + +/* + * This routine is called when an RUD format structure is found in a committed + * transaction in the log. Its purpose is to cancel the corresponding RUI if it + * was still in the log. To do this it searches the AIL for the RUI with an id + * equal to that in the RUD format structure. If we find it we drop the RUD + * reference, which removes the RUI from the AIL and frees it. + */ +STATIC int +xlog_recover_rud_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_rud_log_format *rud_formatp; + + rud_formatp = item->ri_buf[0].i_addr; + ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format)); + + xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id); + return 0; +} + +const struct xlog_recover_item_ops xlog_rud_item_ops = { + .item_type = XFS_LI_RUD, + .commit_pass2 = xlog_recover_rud_commit_pass2, +}; diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h index 8708e4a5aa5c..31e6cdfff71f 100644 --- a/fs/xfs/xfs_rmap_item.h +++ b/fs/xfs/xfs_rmap_item.h @@ -36,11 +36,6 @@ struct kmem_zone; #define XFS_RUI_MAX_FAST_EXTENTS 16 /* - * Define RUI flag bits. Manipulated by set/clear/test_bit operators. - */ -#define XFS_RUI_RECOVERED 1 - -/* * This is the "rmap update intent" log item. It is used to log the fact that * some reverse mappings need to change. It is used in conjunction with the * "rmap update done" log item described below. @@ -52,7 +47,6 @@ struct xfs_rui_log_item { struct xfs_log_item rui_item; atomic_t rui_refcount; atomic_t rui_next_extent; - unsigned long rui_flags; /* misc flags */ struct xfs_rui_log_format rui_format; }; @@ -77,11 +71,4 @@ struct xfs_rud_log_item { extern struct kmem_zone *xfs_rui_zone; extern struct kmem_zone *xfs_rud_zone; -struct xfs_rui_log_item *xfs_rui_init(struct xfs_mount *, uint); -int xfs_rui_copy_format(struct xfs_log_iovec *buf, - struct xfs_rui_log_format *dst_rui_fmt); -void xfs_rui_item_free(struct xfs_rui_log_item *); -void xfs_rui_release(struct xfs_rui_log_item *); -int xfs_rui_recover(struct xfs_mount *mp, struct xfs_rui_log_item *ruip); - #endif /* __XFS_RMAP_ITEM_H__ */ diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 424bb9a2d532..379cbff438bc 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -47,6 +47,39 @@ static struct kset *xfs_kset; /* top-level xfs sysfs dir */ static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ #endif +enum xfs_dax_mode { + XFS_DAX_INODE = 0, + XFS_DAX_ALWAYS = 1, + XFS_DAX_NEVER = 2, +}; + +static void +xfs_mount_set_dax_mode( + struct xfs_mount *mp, + enum xfs_dax_mode mode) +{ + switch (mode) { + case XFS_DAX_INODE: + mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER); + break; + case XFS_DAX_ALWAYS: + mp->m_flags |= XFS_MOUNT_DAX_ALWAYS; + mp->m_flags &= ~XFS_MOUNT_DAX_NEVER; + break; + case XFS_DAX_NEVER: + mp->m_flags |= XFS_MOUNT_DAX_NEVER; + mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS; + break; + } +} + +static const struct constant_table dax_param_enums[] = { + {"inode", XFS_DAX_INODE }, + {"always", XFS_DAX_ALWAYS }, + {"never", XFS_DAX_NEVER }, + {} +}; + /* * Table driven mount option parser. */ @@ -59,7 +92,7 @@ enum { Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, - Opt_discard, Opt_nodiscard, Opt_dax, + Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, }; static const struct fs_parameter_spec xfs_fs_parameters[] = { @@ -103,6 +136,7 @@ static const struct fs_parameter_spec xfs_fs_parameters[] = { fsparam_flag("discard", Opt_discard), fsparam_flag("nodiscard", Opt_nodiscard), fsparam_flag("dax", Opt_dax), + fsparam_enum("dax", Opt_dax_enum, dax_param_enums), {} }; @@ -129,7 +163,8 @@ xfs_fs_show_options( { XFS_MOUNT_GRPID, ",grpid" }, { XFS_MOUNT_DISCARD, ",discard" }, { XFS_MOUNT_LARGEIO, ",largeio" }, - { XFS_MOUNT_DAX, ",dax" }, + { XFS_MOUNT_DAX_ALWAYS, ",dax=always" }, + { XFS_MOUNT_DAX_NEVER, ",dax=never" }, { 0, NULL } }; struct xfs_mount *mp = XFS_M(root->d_sb); @@ -305,7 +340,7 @@ void xfs_blkdev_issue_flush( xfs_buftarg_t *buftarg) { - blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL); + blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS); } STATIC void @@ -702,7 +737,7 @@ xfs_fs_drop_inode( return 0; } - return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE); + return generic_drop_inode(inode); } static void @@ -772,7 +807,8 @@ xfs_fs_statfs( statp->f_blocks = sbp->sb_dblocks - lsize; spin_unlock(&mp->m_sb_lock); - statp->f_bfree = fdblocks - mp->m_alloc_set_aside; + /* make sure statp->f_bfree does not underflow */ + statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0); statp->f_bavail = statp->f_bfree; fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); @@ -838,8 +874,10 @@ xfs_restore_resvblks(struct xfs_mount *mp) * there is no log replay required to write the inodes to disk - this is the * primary difference between a sync and a quiesce. * - * Note: xfs_log_quiesce() stops background log work - the callers must ensure - * it is started again when appropriate. + * We cancel log work early here to ensure all transactions the log worker may + * run have finished before we clean up and log the superblock and write an + * unmount record. The unfreeze process is responsible for restarting the log + * worker correctly. */ void xfs_quiesce_attr( @@ -847,9 +885,7 @@ xfs_quiesce_attr( { int error = 0; - /* wait for all modifications to complete */ - while (atomic_read(&mp->m_active_trans) > 0) - delay(100); + cancel_delayed_work_sync(&mp->m_log->l_work); /* force the log to unpin objects from the now complete transactions */ xfs_log_force(mp, XFS_LOG_SYNC); @@ -863,12 +899,6 @@ xfs_quiesce_attr( if (error) xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " "Frozen image may not be consistent."); - /* - * Just warn here till VFS can correctly support - * read-only remount without racing. - */ - WARN_ON(atomic_read(&mp->m_active_trans) != 0); - xfs_log_quiesce(mp); } @@ -1261,7 +1291,10 @@ xfs_fc_parse_param( return 0; #ifdef CONFIG_FS_DAX case Opt_dax: - mp->m_flags |= XFS_MOUNT_DAX; + xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS); + return 0; + case Opt_dax_enum: + xfs_mount_set_dax_mode(mp, result.uint_32); return 0; #endif default: @@ -1454,7 +1487,7 @@ xfs_fc_fill_super( if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) sb->s_flags |= SB_I_VERSION; - if (mp->m_flags & XFS_MOUNT_DAX) { + if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) { bool rtdev_is_dax = false, datadev_is_dax; xfs_warn(mp, @@ -1468,7 +1501,7 @@ xfs_fc_fill_super( if (!rtdev_is_dax && !datadev_is_dax) { xfs_alert(mp, "DAX unsupported by block device. Turning off DAX."); - mp->m_flags &= ~XFS_MOUNT_DAX; + xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); } if (xfs_sb_version_hasreflink(&mp->m_sb)) { xfs_alert(mp, @@ -1754,7 +1787,6 @@ static int xfs_init_fs_context( INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); spin_lock_init(&mp->m_perag_lock); mutex_init(&mp->m_growlock); - atomic_set(&mp->m_active_trans, 0); INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker); diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c index 13fb4b919648..8e88a7ca387e 100644 --- a/fs/xfs/xfs_symlink.c +++ b/fs/xfs/xfs_symlink.c @@ -243,8 +243,7 @@ xfs_symlink( */ xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); - if (resblks) - resblks -= XFS_IALLOC_SPACE_RES(mp); + resblks -= XFS_IALLOC_SPACE_RES(mp); /* * If the symlink will fit into the inode, write it inline. */ @@ -252,7 +251,7 @@ xfs_symlink( xfs_init_local_fork(ip, XFS_DATA_FORK, target_path, pathlen); ip->i_d.di_size = pathlen; - ip->i_d.di_format = XFS_DINODE_FMT_LOCAL; + ip->i_df.if_format = XFS_DINODE_FMT_LOCAL; xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE); } else { int offset; @@ -265,8 +264,7 @@ xfs_symlink( if (error) goto out_trans_cancel; - if (resblks) - resblks -= fs_blocks; + resblks -= fs_blocks; ip->i_d.di_size = pathlen; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); @@ -386,7 +384,7 @@ xfs_inactive_symlink_rmt( * either 1 or 2 extents and that we can * free them all in one bunmapi call. */ - ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2); + ASSERT(ip->i_df.if_nextents > 0 && ip->i_df.if_nextents <= 2); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); if (error) diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c index 31b3bdbd2eba..021ef96d0542 100644 --- a/fs/xfs/xfs_sysctl.c +++ b/fs/xfs/xfs_sysctl.c @@ -13,7 +13,7 @@ STATIC int xfs_stats_clear_proc_handler( struct ctl_table *ctl, int write, - void __user *buffer, + void *buffer, size_t *lenp, loff_t *ppos) { @@ -33,7 +33,7 @@ STATIC int xfs_panic_mask_proc_handler( struct ctl_table *ctl, int write, - void __user *buffer, + void *buffer, size_t *lenp, loff_t *ppos) { diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index a4323a63438d..460136628a79 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -1897,8 +1897,8 @@ DECLARE_EVENT_CLASS(xfs_swap_extent_class, __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->which = which; __entry->ino = ip->i_ino; - __entry->format = ip->i_d.di_format; - __entry->nex = ip->i_d.di_nextents; + __entry->format = ip->i_df.if_format; + __entry->nex = ip->i_df.if_nextents; __entry->broot_size = ip->i_df.if_broot_bytes; __entry->fork_off = XFS_IFORK_BOFF(ip); ), diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 28b983ff8b11..3c94e5ff4316 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -68,7 +68,6 @@ xfs_trans_free( xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); trace_xfs_trans_free(tp, _RET_IP_); - atomic_dec(&tp->t_mountp->m_active_trans); if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) sb_end_intwrite(tp->t_mountp->m_super); xfs_trans_free_dqinfo(tp); @@ -125,8 +124,6 @@ xfs_trans_dup( xfs_defer_move(ntp, tp); xfs_trans_dup_dqinfo(tp, ntp); - - atomic_inc(&tp->t_mountp->m_active_trans); return ntp; } @@ -275,7 +272,6 @@ xfs_trans_alloc( */ WARN_ON(resp->tr_logres > 0 && mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); - atomic_inc(&mp->m_active_trans); tp->t_magic = XFS_TRANS_HEADER_MAGIC; tp->t_flags = flags; @@ -299,20 +295,19 @@ xfs_trans_alloc( /* * Create an empty transaction with no reservation. This is a defensive - * mechanism for routines that query metadata without actually modifying - * them -- if the metadata being queried is somehow cross-linked (think a - * btree block pointer that points higher in the tree), we risk deadlock. - * However, blocks grabbed as part of a transaction can be re-grabbed. - * The verifiers will notice the corrupt block and the operation will fail - * back to userspace without deadlocking. + * mechanism for routines that query metadata without actually modifying them -- + * if the metadata being queried is somehow cross-linked (think a btree block + * pointer that points higher in the tree), we risk deadlock. However, blocks + * grabbed as part of a transaction can be re-grabbed. The verifiers will + * notice the corrupt block and the operation will fail back to userspace + * without deadlocking. * - * Note the zero-length reservation; this transaction MUST be cancelled - * without any dirty data. + * Note the zero-length reservation; this transaction MUST be cancelled without + * any dirty data. * - * Callers should obtain freeze protection to avoid two conflicts with fs - * freezing: (1) having active transactions trip the m_active_trans ASSERTs; - * and (2) grabbing buffers at the same time that freeze is trying to drain - * the buffer LRU list. + * Callers should obtain freeze protection to avoid a conflict with fs freezing + * where we can be grabbing buffers at the same time that freeze is trying to + * drain the buffer LRU list. */ int xfs_trans_alloc_empty( @@ -534,57 +529,9 @@ xfs_trans_apply_sb_deltas( sizeof(sbp->sb_frextents) - 1); } -STATIC int -xfs_sb_mod8( - uint8_t *field, - int8_t delta) -{ - int8_t counter = *field; - - counter += delta; - if (counter < 0) { - ASSERT(0); - return -EINVAL; - } - *field = counter; - return 0; -} - -STATIC int -xfs_sb_mod32( - uint32_t *field, - int32_t delta) -{ - int32_t counter = *field; - - counter += delta; - if (counter < 0) { - ASSERT(0); - return -EINVAL; - } - *field = counter; - return 0; -} - -STATIC int -xfs_sb_mod64( - uint64_t *field, - int64_t delta) -{ - int64_t counter = *field; - - counter += delta; - if (counter < 0) { - ASSERT(0); - return -EINVAL; - } - *field = counter; - return 0; -} - /* - * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations - * and apply superblock counter changes to the in-core superblock. The + * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and + * apply superblock counter changes to the in-core superblock. The * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT * applied to the in-core superblock. The idea is that that has already been * done. @@ -593,7 +540,12 @@ xfs_sb_mod64( * used block counts are not updated in the on disk superblock. In this case, * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we * still need to update the incore superblock with the changes. + * + * Deltas for the inode count are +/-64, hence we use a large batch size of 128 + * so we don't need to take the counter lock on every update. */ +#define XFS_ICOUNT_BATCH 128 + void xfs_trans_unreserve_and_mod_sb( struct xfs_trans *tp) @@ -629,20 +581,21 @@ xfs_trans_unreserve_and_mod_sb( /* apply the per-cpu counters */ if (blkdelta) { error = xfs_mod_fdblocks(mp, blkdelta, rsvd); - if (error) - goto out; + ASSERT(!error); } if (idelta) { - error = xfs_mod_icount(mp, idelta); - if (error) - goto out_undo_fdblocks; + percpu_counter_add_batch(&mp->m_icount, idelta, + XFS_ICOUNT_BATCH); + if (idelta < 0) + ASSERT(__percpu_counter_compare(&mp->m_icount, 0, + XFS_ICOUNT_BATCH) >= 0); } if (ifreedelta) { - error = xfs_mod_ifree(mp, ifreedelta); - if (error) - goto out_undo_icount; + percpu_counter_add(&mp->m_ifree, ifreedelta); + if (ifreedelta < 0) + ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0); } if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) @@ -650,95 +603,23 @@ xfs_trans_unreserve_and_mod_sb( /* apply remaining deltas */ spin_lock(&mp->m_sb_lock); - if (rtxdelta) { - error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta); - if (error) - goto out_undo_ifree; - } - - if (tp->t_dblocks_delta != 0) { - error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta); - if (error) - goto out_undo_frextents; - } - if (tp->t_agcount_delta != 0) { - error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta); - if (error) - goto out_undo_dblocks; - } - if (tp->t_imaxpct_delta != 0) { - error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta); - if (error) - goto out_undo_agcount; - } - if (tp->t_rextsize_delta != 0) { - error = xfs_sb_mod32(&mp->m_sb.sb_rextsize, - tp->t_rextsize_delta); - if (error) - goto out_undo_imaxpct; - } - if (tp->t_rbmblocks_delta != 0) { - error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, - tp->t_rbmblocks_delta); - if (error) - goto out_undo_rextsize; - } - if (tp->t_rblocks_delta != 0) { - error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta); - if (error) - goto out_undo_rbmblocks; - } - if (tp->t_rextents_delta != 0) { - error = xfs_sb_mod64(&mp->m_sb.sb_rextents, - tp->t_rextents_delta); - if (error) - goto out_undo_rblocks; - } - if (tp->t_rextslog_delta != 0) { - error = xfs_sb_mod8(&mp->m_sb.sb_rextslog, - tp->t_rextslog_delta); - if (error) - goto out_undo_rextents; - } + mp->m_sb.sb_frextents += rtxdelta; + mp->m_sb.sb_dblocks += tp->t_dblocks_delta; + mp->m_sb.sb_agcount += tp->t_agcount_delta; + mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta; + mp->m_sb.sb_rextsize += tp->t_rextsize_delta; + mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta; + mp->m_sb.sb_rblocks += tp->t_rblocks_delta; + mp->m_sb.sb_rextents += tp->t_rextents_delta; + mp->m_sb.sb_rextslog += tp->t_rextslog_delta; spin_unlock(&mp->m_sb_lock); - return; -out_undo_rextents: - if (tp->t_rextents_delta) - xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta); -out_undo_rblocks: - if (tp->t_rblocks_delta) - xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta); -out_undo_rbmblocks: - if (tp->t_rbmblocks_delta) - xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta); -out_undo_rextsize: - if (tp->t_rextsize_delta) - xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta); -out_undo_imaxpct: - if (tp->t_rextsize_delta) - xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta); -out_undo_agcount: - if (tp->t_agcount_delta) - xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta); -out_undo_dblocks: - if (tp->t_dblocks_delta) - xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta); -out_undo_frextents: - if (rtxdelta) - xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta); -out_undo_ifree: - spin_unlock(&mp->m_sb_lock); - if (ifreedelta) - xfs_mod_ifree(mp, -ifreedelta); -out_undo_icount: - if (idelta) - xfs_mod_icount(mp, -idelta); -out_undo_fdblocks: - if (blkdelta) - xfs_mod_fdblocks(mp, -blkdelta, rsvd); -out: - ASSERT(error == 0); + /* + * Debug checks outside of the spinlock so they don't lock up the + * machine if they fail. + */ + ASSERT(mp->m_sb.sb_imax_pct >= 0); + ASSERT(mp->m_sb.sb_rextslog >= 0); return; } diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 752c7fef9de7..8308bf6d7e40 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -59,12 +59,14 @@ struct xfs_log_item { #define XFS_LI_ABORTED 1 #define XFS_LI_FAILED 2 #define XFS_LI_DIRTY 3 /* log item dirty in transaction */ +#define XFS_LI_RECOVERED 4 /* log intent item has been recovered */ #define XFS_LI_FLAGS \ { (1 << XFS_LI_IN_AIL), "IN_AIL" }, \ { (1 << XFS_LI_ABORTED), "ABORTED" }, \ { (1 << XFS_LI_FAILED), "FAILED" }, \ - { (1 << XFS_LI_DIRTY), "DIRTY" } + { (1 << XFS_LI_DIRTY), "DIRTY" }, \ + { (1 << XFS_LI_RECOVERED), "RECOVERED" } struct xfs_item_ops { unsigned flags; @@ -77,6 +79,8 @@ struct xfs_item_ops { void (*iop_release)(struct xfs_log_item *); xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t); void (*iop_error)(struct xfs_log_item *, xfs_buf_t *); + int (*iop_recover)(struct xfs_log_item *lip, struct xfs_trans *tp); + bool (*iop_match)(struct xfs_log_item *item, uint64_t id); }; /* diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 564253550b75..ac5019361a13 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -345,6 +345,45 @@ xfs_ail_delete( xfs_trans_ail_cursor_clear(ailp, lip); } +/* + * Requeue a failed buffer for writeback. + * + * We clear the log item failed state here as well, but we have to be careful + * about reference counts because the only active reference counts on the buffer + * may be the failed log items. Hence if we clear the log item failed state + * before queuing the buffer for IO we can release all active references to + * the buffer and free it, leading to use after free problems in + * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which + * order we process them in - the buffer is locked, and we own the buffer list + * so nothing on them is going to change while we are performing this action. + * + * Hence we can safely queue the buffer for IO before we clear the failed log + * item state, therefore always having an active reference to the buffer and + * avoiding the transient zero-reference state that leads to use-after-free. + */ +static inline int +xfsaild_resubmit_item( + struct xfs_log_item *lip, + struct list_head *buffer_list) +{ + struct xfs_buf *bp = lip->li_buf; + + if (!xfs_buf_trylock(bp)) + return XFS_ITEM_LOCKED; + + if (!xfs_buf_delwri_queue(bp, buffer_list)) { + xfs_buf_unlock(bp); + return XFS_ITEM_FLUSHING; + } + + /* protected by ail_lock */ + list_for_each_entry(lip, &bp->b_li_list, li_bio_list) + xfs_clear_li_failed(lip); + + xfs_buf_unlock(bp); + return XFS_ITEM_SUCCESS; +} + static inline uint xfsaild_push_item( struct xfs_ail *ailp, @@ -365,6 +404,8 @@ xfsaild_push_item( */ if (!lip->li_ops->iop_push) return XFS_ITEM_PINNED; + if (test_bit(XFS_LI_FAILED, &lip->li_flags)) + return xfsaild_resubmit_item(lip, &ailp->ail_buf_list); return lip->li_ops->iop_push(lip, &ailp->ail_buf_list); } @@ -774,6 +815,17 @@ xfs_trans_ail_update_bulk( xfs_ail_update_finish(ailp, tail_lsn); } +/* Insert a log item into the AIL. */ +void +xfs_trans_ail_insert( + struct xfs_ail *ailp, + struct xfs_log_item *lip, + xfs_lsn_t lsn) +{ + spin_lock(&ailp->ail_lock); + xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn); +} + /* * Delete one log item from the AIL. * @@ -800,39 +852,19 @@ xfs_ail_delete_one( return 0; } -/** - * Remove a log items from the AIL - * - * @xfs_trans_ail_delete_bulk takes an array of log items that all need to - * removed from the AIL. The caller is already holding the AIL lock, and done - * all the checks necessary to ensure the items passed in via @log_items are - * ready for deletion. This includes checking that the items are in the AIL. - * - * For each log item to be removed, unlink it from the AIL, clear the IN_AIL - * flag from the item and reset the item's lsn to 0. If we remove the first - * item in the AIL, update the log tail to match the new minimum LSN in the - * AIL. - * - * This function will not drop the AIL lock until all items are removed from - * the AIL to minimise the amount of lock traffic on the AIL. This does not - * greatly increase the AIL hold time, but does significantly reduce the amount - * of traffic on the lock, especially during IO completion. - * - * This function must be called with the AIL lock held. The lock is dropped - * before returning. - */ void xfs_trans_ail_delete( - struct xfs_ail *ailp, struct xfs_log_item *lip, int shutdown_type) { + struct xfs_ail *ailp = lip->li_ailp; struct xfs_mount *mp = ailp->ail_mount; xfs_lsn_t tail_lsn; + spin_lock(&ailp->ail_lock); if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) { spin_unlock(&ailp->ail_lock); - if (!XFS_FORCED_SHUTDOWN(mp)) { + if (shutdown_type && !XFS_FORCED_SHUTDOWN(mp)) { xfs_alert_tag(mp, XFS_PTAG_AILDELETE, "%s: attempting to delete a log item that is not in the AIL", __func__); @@ -841,6 +873,7 @@ xfs_trans_ail_delete( return; } + /* xfs_ail_update_finish() drops the AIL lock */ tail_lsn = xfs_ail_delete_one(ailp, lip); xfs_ail_update_finish(ailp, tail_lsn); } diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index d1b9869bc5fa..c0f73b82c055 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -388,7 +388,7 @@ xfs_trans_apply_dquot_deltas( */ if (d->d_id) { xfs_qm_adjust_dqlimits(tp->t_mountp, dqp); - xfs_qm_adjust_dqtimers(tp->t_mountp, d); + xfs_qm_adjust_dqtimers(tp->t_mountp, dqp); } dqp->dq_flags |= XFS_DQ_DIRTY; @@ -591,7 +591,7 @@ xfs_trans_dqresv( xfs_dqlock(dqp); - defq = xfs_get_defquota(dqp, q); + defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); if (flags & XFS_TRANS_DQ_RES_BLKS) { hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); @@ -602,7 +602,7 @@ xfs_trans_dqresv( softlimit = defq->bsoftlimit; timer = be32_to_cpu(dqp->q_core.d_btimer); warns = be16_to_cpu(dqp->q_core.d_bwarns); - warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit; + warnlimit = defq->bwarnlimit; resbcountp = &dqp->q_res_bcount; } else { ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); @@ -614,7 +614,7 @@ xfs_trans_dqresv( softlimit = defq->rtbsoftlimit; timer = be32_to_cpu(dqp->q_core.d_rtbtimer); warns = be16_to_cpu(dqp->q_core.d_rtbwarns); - warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit; + warnlimit = defq->rtbwarnlimit; resbcountp = &dqp->q_res_rtbcount; } @@ -650,7 +650,7 @@ xfs_trans_dqresv( total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); - warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; + warnlimit = defq->iwarnlimit; hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); if (!hardlimit) hardlimit = defq->ihardlimit; @@ -711,7 +711,7 @@ xfs_trans_dqresv( error_return: xfs_dqunlock(dqp); - if (flags & XFS_QMOPT_ENOSPC) + if (XFS_QM_ISPDQ(dqp)) return -ENOSPC; return -EDQUOT; } @@ -751,8 +751,7 @@ xfs_trans_reserve_quota_bydquots( ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (udqp) { - error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, - (flags & ~XFS_QMOPT_ENOSPC)); + error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags); if (error) return error; } @@ -803,16 +802,12 @@ xfs_trans_reserve_quota_nblks( if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) return 0; - if (XFS_IS_PQUOTA_ON(mp)) - flags |= XFS_QMOPT_ENOSPC; ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == - XFS_TRANS_DQ_RES_RTBLKS || - (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == - XFS_TRANS_DQ_RES_BLKS); + ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS || + (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS); /* * Reserve nblks against these dquots, with trans as the mediator. diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 35655eac01a6..3004aeac9110 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -91,26 +91,13 @@ xfs_trans_ail_update( xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn); } +void xfs_trans_ail_insert(struct xfs_ail *ailp, struct xfs_log_item *lip, + xfs_lsn_t lsn); + xfs_lsn_t xfs_ail_delete_one(struct xfs_ail *ailp, struct xfs_log_item *lip); void xfs_ail_update_finish(struct xfs_ail *ailp, xfs_lsn_t old_lsn) __releases(ailp->ail_lock); -void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip, - int shutdown_type); - -static inline void -xfs_trans_ail_remove( - struct xfs_log_item *lip, - int shutdown_type) -{ - struct xfs_ail *ailp = lip->li_ailp; - - spin_lock(&ailp->ail_lock); - /* xfs_trans_ail_delete() drops the AIL lock */ - if (test_bit(XFS_LI_IN_AIL, &lip->li_flags)) - xfs_trans_ail_delete(ailp, lip, shutdown_type); - else - spin_unlock(&ailp->ail_lock); -} +void xfs_trans_ail_delete(struct xfs_log_item *lip, int shutdown_type); void xfs_ail_push(struct xfs_ail *, xfs_lsn_t); void xfs_ail_push_all(struct xfs_ail *); diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c index fc5d7276026e..bca48b308c02 100644 --- a/fs/xfs/xfs_xattr.c +++ b/fs/xfs/xfs_xattr.c @@ -12,7 +12,6 @@ #include "xfs_inode.h" #include "xfs_attr.h" #include "xfs_acl.h" -#include "xfs_da_format.h" #include "xfs_da_btree.h" #include <linux/posix_acl_xattr.h> diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index 3ce9829a6936..07bc42d62673 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -20,6 +20,7 @@ #include <linux/mman.h> #include <linux/sched/mm.h> #include <linux/crc32.h> +#include <linux/task_io_accounting_ops.h> #include "zonefs.h" @@ -78,10 +79,9 @@ static int zonefs_readpage(struct file *unused, struct page *page) return iomap_readpage(page, &zonefs_iomap_ops); } -static int zonefs_readpages(struct file *unused, struct address_space *mapping, - struct list_head *pages, unsigned int nr_pages) +static void zonefs_readahead(struct readahead_control *rac) { - return iomap_readpages(mapping, pages, nr_pages, &zonefs_iomap_ops); + iomap_readahead(rac, &zonefs_iomap_ops); } /* @@ -128,7 +128,7 @@ static int zonefs_writepages(struct address_space *mapping, static const struct address_space_operations zonefs_file_aops = { .readpage = zonefs_readpage, - .readpages = zonefs_readpages, + .readahead = zonefs_readahead, .writepage = zonefs_writepage, .writepages = zonefs_writepages, .set_page_dirty = iomap_set_page_dirty, @@ -478,7 +478,7 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end, if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV) ret = file_write_and_wait_range(file, start, end); if (!ret) - ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); if (ret) zonefs_io_error(inode, true); @@ -596,6 +596,61 @@ static const struct iomap_dio_ops zonefs_write_dio_ops = { .end_io = zonefs_file_write_dio_end_io, }; +static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) +{ + struct inode *inode = file_inode(iocb->ki_filp); + struct zonefs_inode_info *zi = ZONEFS_I(inode); + struct block_device *bdev = inode->i_sb->s_bdev; + unsigned int max; + struct bio *bio; + ssize_t size; + int nr_pages; + ssize_t ret; + + nr_pages = iov_iter_npages(from, BIO_MAX_PAGES); + if (!nr_pages) + return 0; + + max = queue_max_zone_append_sectors(bdev_get_queue(bdev)); + max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize); + iov_iter_truncate(from, max); + + bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set); + if (!bio) + return -ENOMEM; + + bio_set_dev(bio, bdev); + bio->bi_iter.bi_sector = zi->i_zsector; + bio->bi_write_hint = iocb->ki_hint; + bio->bi_ioprio = iocb->ki_ioprio; + bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; + if (iocb->ki_flags & IOCB_DSYNC) + bio->bi_opf |= REQ_FUA; + + ret = bio_iov_iter_get_pages(bio, from); + if (unlikely(ret)) { + bio_io_error(bio); + return ret; + } + size = bio->bi_iter.bi_size; + task_io_account_write(ret); + + if (iocb->ki_flags & IOCB_HIPRI) + bio_set_polled(bio, iocb); + + ret = submit_bio_wait(bio); + + bio_put(bio); + + zonefs_file_write_dio_end_io(iocb, size, ret, 0); + if (ret >= 0) { + iocb->ki_pos += size; + return size; + } + + return ret; +} + /* * Handle direct writes. For sequential zone files, this is the only possible * write path. For these files, check that the user is issuing writes @@ -611,6 +666,8 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(iocb->ki_filp); struct zonefs_inode_info *zi = ZONEFS_I(inode); struct super_block *sb = inode->i_sb; + bool sync = is_sync_kiocb(iocb); + bool append = false; size_t count; ssize_t ret; @@ -619,7 +676,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) * as this can cause write reordering (e.g. the first aio gets EAGAIN * on the inode lock but the second goes through but is now unaligned). */ - if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) && + if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync && (iocb->ki_flags & IOCB_NOWAIT)) return -EOPNOTSUPP; @@ -643,16 +700,22 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) } /* Enforce sequential writes (append only) in sequential zones */ - mutex_lock(&zi->i_truncate_mutex); - if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && iocb->ki_pos != zi->i_wpoffset) { + if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) { + mutex_lock(&zi->i_truncate_mutex); + if (iocb->ki_pos != zi->i_wpoffset) { + mutex_unlock(&zi->i_truncate_mutex); + ret = -EINVAL; + goto inode_unlock; + } mutex_unlock(&zi->i_truncate_mutex); - ret = -EINVAL; - goto inode_unlock; + append = sync; } - mutex_unlock(&zi->i_truncate_mutex); - ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops, - &zonefs_write_dio_ops, is_sync_kiocb(iocb)); + if (append) + ret = zonefs_file_dio_append(iocb, from); + else + ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops, + &zonefs_write_dio_ops, sync); if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && (ret > 0 || ret == -EIOCBQUEUED)) { if (ret > 0) @@ -1267,7 +1330,7 @@ static int zonefs_read_super(struct super_block *sb) goto unmap; } - uuid_copy(&sbi->s_uuid, (uuid_t *)super->s_uuid); + import_uuid(&sbi->s_uuid, super->s_uuid); ret = 0; unmap: |