diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-13 17:45:40 -0600 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-13 17:45:40 -0600 |
commit | cec997093bbff881c3da49084dfba4f76361e96a (patch) | |
tree | 7c84f8c30ceef7209a18d7cd216a3c16536008c5 /fs | |
parent | 8d2d441ac4af223eae466c3c31ff737cc31a1411 (diff) | |
parent | 01777836c87081e4f68c4a43c9abe6114805f91e (diff) |
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
Pull quota, reiserfs, UDF updates from Jan Kara:
"Scalability improvements for quota, a few reiserfs fixes, and couple
of misc cleanups (udf, ext2)"
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
reiserfs: Fix use after free in journal teardown
reiserfs: fix corruption introduced by balance_leaf refactor
udf: avoid redundant memcpy when writing data in ICB
fs/udf: re-use hex_asc_upper_{hi,lo} macros
fs/quota: kernel-doc warning fixes
udf: use linux/uaccess.h
fs/ext2/super.c: Drop memory allocation cast
quota: remove dqptr_sem
quota: simplify remove_inode_dquot_ref()
quota: avoid unnecessary dqget()/dqput() calls
quota: protect Q_GETFMT by dqonoff_mutex
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext2/super.c | 2 | ||||
-rw-r--r-- | fs/quota/dquot.c | 180 | ||||
-rw-r--r-- | fs/quota/kqid.c | 2 | ||||
-rw-r--r-- | fs/quota/netlink.c | 3 | ||||
-rw-r--r-- | fs/quota/quota.c | 6 | ||||
-rw-r--r-- | fs/reiserfs/do_balan.c | 111 | ||||
-rw-r--r-- | fs/reiserfs/journal.c | 22 | ||||
-rw-r--r-- | fs/reiserfs/lbalance.c | 5 | ||||
-rw-r--r-- | fs/reiserfs/reiserfs.h | 9 | ||||
-rw-r--r-- | fs/reiserfs/super.c | 6 | ||||
-rw-r--r-- | fs/super.c | 1 | ||||
-rw-r--r-- | fs/udf/file.c | 22 | ||||
-rw-r--r-- | fs/udf/lowlevel.c | 2 | ||||
-rw-r--r-- | fs/udf/super.c | 2 | ||||
-rw-r--r-- | fs/udf/symlink.c | 2 | ||||
-rw-r--r-- | fs/udf/unicode.c | 9 |
16 files changed, 189 insertions, 195 deletions
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 3750031cfa2f..b88edc05c230 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -161,7 +161,7 @@ static struct kmem_cache * ext2_inode_cachep; static struct inode *ext2_alloc_inode(struct super_block *sb) { struct ext2_inode_info *ei; - ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->i_block_alloc_info = NULL; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 7f30bdc57d13..f2d0eee9d1f1 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -96,13 +96,16 @@ * Note that some things (eg. sb pointer, type, id) doesn't change during * the life of the dquot structure and so needn't to be protected by a lock * - * Any operation working on dquots via inode pointers must hold dqptr_sem. If - * operation is just reading pointers from inode (or not using them at all) the - * read lock is enough. If pointers are altered function must hold write lock. + * Operation accessing dquots via inode pointers are protected by dquot_srcu. + * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and + * synchronize_srcu(&dquot_srcu) is called after clearing pointers from + * inode and before dropping dquot references to avoid use of dquots after + * they are freed. dq_data_lock is used to serialize the pointer setting and + * clearing operations. * Special care needs to be taken about S_NOQUOTA inode flag (marking that * inode is a quota file). Functions adding pointers from inode to dquots have - * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they - * have to do all pointer modifications before dropping dqptr_sem. This makes + * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they + * have to do all pointer modifications before dropping dq_data_lock. This makes * sure they cannot race with quotaon which first sets S_NOQUOTA flag and * then drops all pointers to dquots from an inode. * @@ -116,21 +119,15 @@ * spinlock to internal buffers before writing. * * Lock ordering (including related VFS locks) is the following: - * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock > - * dqio_mutex + * dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > - * dqptr_sem. But filesystem has to count with the fact that functions such as - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called - * from inside a transaction to keep filesystem consistency after a crash. Also - * filesystems usually want to do some IO on dquot from ->mark_dirty which is - * called with dqptr_sem held. */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); EXPORT_SYMBOL(dq_data_lock); +DEFINE_STATIC_SRCU(dquot_srcu); void __quota_error(struct super_block *sb, const char *func, const char *fmt, ...) @@ -733,7 +730,6 @@ static struct shrinker dqcache_shrinker = { /* * Put reference to dquot - * NOTE: If you change this function please check whether dqput_blocks() works right... */ void dqput(struct dquot *dquot) { @@ -963,46 +959,33 @@ static void add_dquot_ref(struct super_block *sb, int type) } /* - * Return 0 if dqput() won't block. - * (note that 1 doesn't necessarily mean blocking) - */ -static inline int dqput_blocks(struct dquot *dquot) -{ - if (atomic_read(&dquot->dq_count) <= 1) - return 1; - return 0; -} - -/* * Remove references to dquots from inode and add dquot to list for freeing * if we have the last reference to dquot - * We can't race with anybody because we hold dqptr_sem for writing... */ -static int remove_inode_dquot_ref(struct inode *inode, int type, - struct list_head *tofree_head) +static void remove_inode_dquot_ref(struct inode *inode, int type, + struct list_head *tofree_head) { struct dquot *dquot = inode->i_dquot[type]; inode->i_dquot[type] = NULL; - if (dquot) { - if (dqput_blocks(dquot)) { -#ifdef CONFIG_QUOTA_DEBUG - if (atomic_read(&dquot->dq_count) != 1) - quota_error(inode->i_sb, "Adding dquot with " - "dq_count %d to dispose list", - atomic_read(&dquot->dq_count)); -#endif - spin_lock(&dq_list_lock); - /* As dquot must have currently users it can't be on - * the free list... */ - list_add(&dquot->dq_free, tofree_head); - spin_unlock(&dq_list_lock); - return 1; - } - else - dqput(dquot); /* We have guaranteed we won't block */ + if (!dquot) + return; + + if (list_empty(&dquot->dq_free)) { + /* + * The inode still has reference to dquot so it can't be in the + * free list + */ + spin_lock(&dq_list_lock); + list_add(&dquot->dq_free, tofree_head); + spin_unlock(&dq_list_lock); + } else { + /* + * Dquot is already in a list to put so we won't drop the last + * reference here. + */ + dqput(dquot); } - return 0; } /* @@ -1037,13 +1020,15 @@ static void remove_dquot_ref(struct super_block *sb, int type, * We have to scan also I_NEW inodes because they can already * have quota pointer initialized. Luckily, we need to touch * only quota pointers and these have separate locking - * (dqptr_sem). + * (dq_data_lock). */ + spin_lock(&dq_data_lock); if (!IS_NOQUOTA(inode)) { if (unlikely(inode_get_rsv_space(inode) > 0)) reserved = 1; remove_inode_dquot_ref(inode, type, tofree_head); } + spin_unlock(&dq_data_lock); } spin_unlock(&inode_sb_list_lock); #ifdef CONFIG_QUOTA_DEBUG @@ -1061,9 +1046,8 @@ static void drop_dquot_ref(struct super_block *sb, int type) LIST_HEAD(tofree_head); if (sb->dq_op) { - down_write(&sb_dqopt(sb)->dqptr_sem); remove_dquot_ref(sb, type, &tofree_head); - up_write(&sb_dqopt(sb)->dqptr_sem); + synchronize_srcu(&dquot_srcu); put_dquot_list(&tofree_head); } } @@ -1394,21 +1378,16 @@ static int dquot_active(const struct inode *inode) /* * Initialize quota pointers in inode * - * We do things in a bit complicated way but by that we avoid calling - * dqget() and thus filesystem callbacks under dqptr_sem. - * * It is better to call this function outside of any transaction as it * might need a lot of space in journal for dquot structure allocation. */ static void __dquot_initialize(struct inode *inode, int type) { - int cnt; + int cnt, init_needed = 0; struct dquot *got[MAXQUOTAS]; struct super_block *sb = inode->i_sb; qsize_t rsv; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) return; @@ -1418,6 +1397,15 @@ static void __dquot_initialize(struct inode *inode, int type) got[cnt] = NULL; if (type != -1 && cnt != type) continue; + /* + * The i_dquot should have been initialized in most cases, + * we check it without locking here to avoid unnecessary + * dqget()/dqput() calls. + */ + if (inode->i_dquot[cnt]) + continue; + init_needed = 1; + switch (cnt) { case USRQUOTA: qid = make_kqid_uid(inode->i_uid); @@ -1429,7 +1417,11 @@ static void __dquot_initialize(struct inode *inode, int type) got[cnt] = dqget(sb, qid); } - down_write(&sb_dqopt(sb)->dqptr_sem); + /* All required i_dquot has been initialized */ + if (!init_needed) + return; + + spin_lock(&dq_data_lock); if (IS_NOQUOTA(inode)) goto out_err; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1449,15 +1441,12 @@ static void __dquot_initialize(struct inode *inode, int type) * did a write before quota was turned on */ rsv = inode_get_rsv_space(inode); - if (unlikely(rsv)) { - spin_lock(&dq_data_lock); + if (unlikely(rsv)) dquot_resv_space(inode->i_dquot[cnt], rsv); - spin_unlock(&dq_data_lock); - } } } out_err: - up_write(&sb_dqopt(sb)->dqptr_sem); + spin_unlock(&dq_data_lock); /* Drop unused references */ dqput_all(got); } @@ -1469,19 +1458,24 @@ void dquot_initialize(struct inode *inode) EXPORT_SYMBOL(dquot_initialize); /* - * Release all quotas referenced by inode + * Release all quotas referenced by inode. + * + * This function only be called on inode free or converting + * a file to quota file, no other users for the i_dquot in + * both cases, so we needn't call synchronize_srcu() after + * clearing i_dquot. */ static void __dquot_drop(struct inode *inode) { int cnt; struct dquot *put[MAXQUOTAS]; - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { put[cnt] = inode->i_dquot[cnt]; inode->i_dquot[cnt] = NULL; } - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + spin_unlock(&dq_data_lock); dqput_all(put); } @@ -1599,15 +1593,11 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) */ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) { - int cnt, ret = 0; + int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; struct dquot **dquots = inode->i_dquot; int reserve = flags & DQUOT_SPACE_RESERVE; - /* - * First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex - */ if (!dquot_active(inode)) { inode_incr_space(inode, number, reserve); goto out; @@ -1616,7 +1606,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!dquots[cnt]) @@ -1643,7 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) goto out_flush_warn; mark_all_dquot_dirty(dquots); out_flush_warn: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); out: return ret; @@ -1655,17 +1645,16 @@ EXPORT_SYMBOL(__dquot_alloc_space); */ int dquot_alloc_inode(const struct inode *inode) { - int cnt, ret = 0; + int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; struct dquot * const *dquots = inode->i_dquot; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) return 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!dquots[cnt]) @@ -1685,7 +1674,7 @@ warn_put_all: spin_unlock(&dq_data_lock); if (ret == 0) mark_all_dquot_dirty(dquots); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); return ret; } @@ -1696,14 +1685,14 @@ EXPORT_SYMBOL(dquot_alloc_inode); */ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) { - int cnt; + int cnt, index; if (!dquot_active(inode)) { inode_claim_rsv_space(inode, number); return 0; } - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1715,7 +1704,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) inode_claim_rsv_space(inode, number); spin_unlock(&dq_data_lock); mark_all_dquot_dirty(inode->i_dquot); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); return 0; } EXPORT_SYMBOL(dquot_claim_space_nodirty); @@ -1725,14 +1714,14 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty); */ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) { - int cnt; + int cnt, index; if (!dquot_active(inode)) { inode_reclaim_rsv_space(inode, number); return; } - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1744,7 +1733,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) inode_reclaim_rsv_space(inode, number); spin_unlock(&dq_data_lock); mark_all_dquot_dirty(inode->i_dquot); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); return; } EXPORT_SYMBOL(dquot_reclaim_space_nodirty); @@ -1757,16 +1746,14 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; struct dquot **dquots = inode->i_dquot; - int reserve = flags & DQUOT_SPACE_RESERVE; + int reserve = flags & DQUOT_SPACE_RESERVE, index; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) { inode_decr_space(inode, number, reserve); return; } - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; @@ -1789,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) goto out_unlock; mark_all_dquot_dirty(dquots); out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); } EXPORT_SYMBOL(__dquot_free_space); @@ -1802,13 +1789,12 @@ void dquot_free_inode(const struct inode *inode) unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; struct dquot * const *dquots = inode->i_dquot; + int index; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) return; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + index = srcu_read_lock(&dquot_srcu); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; @@ -1823,7 +1809,7 @@ void dquot_free_inode(const struct inode *inode) } spin_unlock(&dq_data_lock); mark_all_dquot_dirty(dquots); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); } EXPORT_SYMBOL(dquot_free_inode); @@ -1837,6 +1823,8 @@ EXPORT_SYMBOL(dquot_free_inode); * This operation can block, but only after everything is updated * A transaction must be started when entering this function. * + * We are holding reference on transfer_from & transfer_to, no need to + * protect them by srcu_read_lock(). */ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) { @@ -1849,8 +1837,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) struct dquot_warn warn_from_inodes[MAXQUOTAS]; struct dquot_warn warn_from_space[MAXQUOTAS]; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return 0; /* Initialize the arrays */ @@ -1859,12 +1845,12 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; } - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + + spin_lock(&dq_data_lock); if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + spin_unlock(&dq_data_lock); return 0; } - spin_lock(&dq_data_lock); cur_space = inode_get_bytes(inode); rsv_space = inode_get_rsv_space(inode); space = cur_space + rsv_space; @@ -1918,7 +1904,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) inode->i_dquot[cnt] = transfer_to[cnt]; } spin_unlock(&dq_data_lock); - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); mark_all_dquot_dirty(transfer_from); mark_all_dquot_dirty(transfer_to); @@ -1932,7 +1917,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) return 0; over_quota: spin_unlock(&dq_data_lock); - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); flush_warnings(warn_to); return ret; } diff --git a/fs/quota/kqid.c b/fs/quota/kqid.c index 2f97b0e2c501..ebc5e6285800 100644 --- a/fs/quota/kqid.c +++ b/fs/quota/kqid.c @@ -55,7 +55,7 @@ EXPORT_SYMBOL(qid_lt); /** * from_kqid - Create a qid from a kqid user-namespace pair. * @targ: The user namespace we want a qid in. - * @kuid: The kernel internal quota identifier to start with. + * @kqid: The kernel internal quota identifier to start with. * * Map @kqid into the user-namespace specified by @targ and * return the resulting qid. diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c index 72d29177998e..bb2869f5dfd8 100644 --- a/fs/quota/netlink.c +++ b/fs/quota/netlink.c @@ -32,8 +32,7 @@ static struct genl_family quota_genl_family = { /** * quota_send_warning - Send warning to userspace about exceeded quota - * @type: The quota type: USRQQUOTA, GRPQUOTA,... - * @id: The user or group id of the quota that was exceeded + * @qid: The kernel internal quota identifier. * @dev: The device on which the fs is mounted (sb->s_dev) * @warntype: The type of the warning: QUOTA_NL_... * diff --git a/fs/quota/quota.c b/fs/quota/quota.c index ff3f0b3cfdb3..75621649dbd7 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -79,13 +79,13 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr) { __u32 fmt; - down_read(&sb_dqopt(sb)->dqptr_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_active(sb, type)) { - up_read(&sb_dqopt(sb)->dqptr_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; - up_read(&sb_dqopt(sb)->dqptr_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); if (copy_to_user(addr, &fmt, sizeof(fmt))) return -EFAULT; return 0; diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 5739cb99de7b..9c02d96d3a42 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c @@ -286,12 +286,14 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag) return 0; } -static void balance_leaf_insert_left(struct tree_balance *tb, - struct item_head *ih, const char *body) +static unsigned int balance_leaf_insert_left(struct tree_balance *tb, + struct item_head *const ih, + const char * const body) { int ret; struct buffer_info bi; int n = B_NR_ITEMS(tb->L[0]); + unsigned body_shift_bytes = 0; if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { /* part of new item falls into L[0] */ @@ -329,7 +331,7 @@ static void balance_leaf_insert_left(struct tree_balance *tb, put_ih_item_len(ih, new_item_len); if (tb->lbytes > tb->zeroes_num) { - body += (tb->lbytes - tb->zeroes_num); + body_shift_bytes = tb->lbytes - tb->zeroes_num; tb->zeroes_num = 0; } else tb->zeroes_num -= tb->lbytes; @@ -349,11 +351,12 @@ static void balance_leaf_insert_left(struct tree_balance *tb, tb->insert_size[0] = 0; tb->zeroes_num = 0; } + return body_shift_bytes; } static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { int n = B_NR_ITEMS(tb->L[0]); struct buffer_info bi; @@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, tb->pos_in_item -= tb->lbytes; } -static void balance_leaf_paste_left_shift(struct tree_balance *tb, - struct item_head *ih, - const char *body) +static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb, + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n = B_NR_ITEMS(tb->L[0]); struct buffer_info bi; + int body_shift_bytes = 0; if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { balance_leaf_paste_left_shift_dirent(tb, ih, body); - return; + return 0; } RFALSE(tb->lbytes <= 0, @@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb, * insert_size[0] */ if (l_n > tb->zeroes_num) { - body += (l_n - tb->zeroes_num); + body_shift_bytes = l_n - tb->zeroes_num; tb->zeroes_num = 0; } else tb->zeroes_num -= l_n; @@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb, */ leaf_shift_left(tb, tb->lnum[0], tb->lbytes); } + return body_shift_bytes; } /* appended item will be in L[0] in whole */ static void balance_leaf_paste_left_whole(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n = B_NR_ITEMS(tb->L[0]); @@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whole(struct tree_balance *tb, tb->zeroes_num = 0; } -static void balance_leaf_paste_left(struct tree_balance *tb, - struct item_head *ih, const char *body) +static unsigned int balance_leaf_paste_left(struct tree_balance *tb, + struct item_head * const ih, + const char * const body) { /* we must shift the part of the appended item */ if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) - balance_leaf_paste_left_shift(tb, ih, body); + return balance_leaf_paste_left_shift(tb, ih, body); else balance_leaf_paste_left_whole(tb, ih, body); + return 0; } /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ -static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih, - const char *body, int flag) +static unsigned int balance_leaf_left(struct tree_balance *tb, + struct item_head * const ih, + const char * const body, int flag) { if (tb->lnum[0] <= 0) - return; + return 0; /* new item or it part falls to L[0], shift it too */ if (tb->item_pos < tb->lnum[0]) { BUG_ON(flag != M_INSERT && flag != M_PASTE); if (flag == M_INSERT) - balance_leaf_insert_left(tb, ih, body); + return balance_leaf_insert_left(tb, ih, body); else /* M_PASTE */ - balance_leaf_paste_left(tb, ih, body); + return balance_leaf_paste_left(tb, ih, body); } else /* new item doesn't fall into L[0] */ leaf_shift_left(tb, tb->lnum[0], tb->lbytes); + return 0; } static void balance_leaf_insert_right(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); @@ -704,7 +714,8 @@ static void balance_leaf_insert_right(struct tree_balance *tb, static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_info bi; @@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, } static void balance_leaf_paste_right_shift(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n_shift, n_rem, r_zeroes_number, version; @@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shift(struct tree_balance *tb, } static void balance_leaf_paste_right_whole(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n = B_NR_ITEMS(tbS0); @@ -874,7 +887,8 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb, } static void balance_leaf_paste_right(struct tree_balance *tb, - struct item_head *ih, const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); int n = B_NR_ITEMS(tbS0); @@ -896,8 +910,9 @@ static void balance_leaf_paste_right(struct tree_balance *tb, } /* shift rnum[0] items from S[0] to the right neighbor R[0] */ -static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, - const char *body, int flag) +static void balance_leaf_right(struct tree_balance *tb, + struct item_head * const ih, + const char * const body, int flag) { if (tb->rnum[0] <= 0) return; @@ -911,8 +926,8 @@ static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, } static void balance_leaf_new_nodes_insert(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb, /* we append to directory item */ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, } static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, } static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, } static void balance_leaf_new_nodes_paste(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int i) @@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste(struct tree_balance *tb, /* Fill new nodes that appear in place of S[0] */ static void balance_leaf_new_nodes(struct tree_balance *tb, - struct item_head *ih, - const char *body, + struct item_head * const ih, + const char * const body, struct item_head *insert_key, struct buffer_head **insert_ptr, int flag) @@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struct tree_balance *tb, } static void balance_leaf_finish_node_insert(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_info bi; @@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_insert(struct tree_balance *tb, } static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct item_head *pasted = item_head(tbS0, tb->item_pos); @@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, } static void balance_leaf_finish_node_paste(struct tree_balance *tb, - struct item_head *ih, - const char *body) + struct item_head * const ih, + const char * const body) { struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_info bi; @@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_paste(struct tree_balance *tb, * of the affected item which remains in S */ static void balance_leaf_finish_node(struct tree_balance *tb, - struct item_head *ih, - const char *body, int flag) + struct item_head * const ih, + const char * const body, int flag) { /* if we must insert or append into buffer S[0] */ if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { @@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) tb->pos_in_item *= UNFM_P_SIZE; - balance_leaf_left(tb, ih, body, flag); + body += balance_leaf_left(tb, ih, body, flag); /* tb->lnum[0] > 0 */ /* Calculate new item position */ diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index e8870de4627e..a88b1b3e7db3 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -1947,8 +1947,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, } } - /* wait for all commits to finish */ - cancel_delayed_work(&SB_JOURNAL(sb)->j_work); /* * We must release the write lock here because @@ -1956,8 +1954,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, */ reiserfs_write_unlock(sb); + /* + * Cancel flushing of old commits. Note that neither of these works + * will be requeued because superblock is being shutdown and doesn't + * have MS_ACTIVE set. + */ cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); - flush_workqueue(REISERFS_SB(sb)->commit_wq); + /* wait for all commits to finish */ + cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); free_journal_ram(sb); @@ -4292,9 +4296,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags) if (flush) { flush_commit_list(sb, jl, 1); flush_journal_list(sb, jl, 1); - } else if (!(jl->j_state & LIST_COMMIT_PENDING)) - queue_delayed_work(REISERFS_SB(sb)->commit_wq, - &journal->j_work, HZ / 10); + } else if (!(jl->j_state & LIST_COMMIT_PENDING)) { + /* + * Avoid queueing work when sb is being shut down. Transaction + * will be flushed on journal shutdown. + */ + if (sb->s_flags & MS_ACTIVE) + queue_delayed_work(REISERFS_SB(sb)->commit_wq, + &journal->j_work, HZ / 10); + } /* * if the next transaction has any chance of wrapping, flush diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c index 814dda3ec998..249594a821e0 100644 --- a/fs/reiserfs/lbalance.c +++ b/fs/reiserfs/lbalance.c @@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first, /* insert item into the leaf node in position before */ void leaf_insert_into_buf(struct buffer_info *bi, int before, - struct item_head *inserted_item_ih, - const char *inserted_item_body, int zeros_number) + struct item_head * const inserted_item_ih, + const char * const inserted_item_body, + int zeros_number) { struct buffer_head *bh = bi->bi_bh; int nr, free_space; diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index bf53888c7f59..735c2c2b4536 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes); void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, int del_num, int del_bytes); void leaf_insert_into_buf(struct buffer_info *bi, int before, - struct item_head *inserted_item_ih, - const char *inserted_item_body, int zeros_number); -void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, - int pos_in_item, int paste_size, const char *body, + struct item_head * const inserted_item_ih, + const char * const inserted_item_body, int zeros_number); +void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, + int pos_in_item, int paste_size, + const char * const body, int zeros_number); void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, int pos_in_item, int cut_size); void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 709ea92d716f..d46e88a33b02 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -100,7 +100,11 @@ void reiserfs_schedule_old_flush(struct super_block *s) struct reiserfs_sb_info *sbi = REISERFS_SB(s); unsigned long delay; - if (s->s_flags & MS_RDONLY) + /* + * Avoid scheduling flush when sb is being shut down. It can race + * with journal shutdown and free still queued delayed work. + */ + if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE)) return; spin_lock(&sbi->old_work_lock); diff --git a/fs/super.c b/fs/super.c index a371ce6aa919..b9a214d2fe98 100644 --- a/fs/super.c +++ b/fs/super.c @@ -217,7 +217,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); mutex_init(&s->s_dquot.dqio_mutex); mutex_init(&s->s_dquot.dqonoff_mutex); - init_rwsem(&s->s_dquot.dqptr_sem); s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; diff --git a/fs/udf/file.c b/fs/udf/file.c index d80738fdf424..86c6743ec1fe 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -27,7 +27,7 @@ #include "udfdecl.h" #include <linux/fs.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/string.h> /* memset */ #include <linux/capability.h> @@ -100,24 +100,6 @@ static int udf_adinicb_write_begin(struct file *file, return 0; } -static int udf_adinicb_write_end(struct file *file, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - struct inode *inode = mapping->host; - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); - char *kaddr; - struct udf_inode_info *iinfo = UDF_I(inode); - - kaddr = kmap_atomic(page); - memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, - kaddr + offset, copied); - kunmap_atomic(kaddr); - - return simple_write_end(file, mapping, pos, len, copied, page, fsdata); -} - static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset) @@ -130,7 +112,7 @@ const struct address_space_operations udf_adinicb_aops = { .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, .write_begin = udf_adinicb_write_begin, - .write_end = udf_adinicb_write_end, + .write_end = simple_write_end, .direct_IO = udf_adinicb_direct_IO, }; diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c index 6583fe9b0645..6ad5a453af97 100644 --- a/fs/udf/lowlevel.c +++ b/fs/udf/lowlevel.c @@ -21,7 +21,7 @@ #include <linux/blkdev.h> #include <linux/cdrom.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "udf_sb.h" diff --git a/fs/udf/super.c b/fs/udf/super.c index 3286db047a40..813da94d447b 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -63,7 +63,7 @@ #include "udf_i.h" #include <linux/init.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #define VDS_POS_PRIMARY_VOL_DESC 0 #define VDS_POS_UNALLOC_SPACE_DESC 1 diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index d7c6dbe4194b..6fb7945c1e6e 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -20,7 +20,7 @@ */ #include "udfdecl.h" -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/time.h> diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index 44b815e57f94..afd470e588ff 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c @@ -412,7 +412,6 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int extIndex = 0, newExtIndex = 0, hasExt = 0; unsigned short valueCRC; uint8_t curr; - const uint8_t hexChar[] = "0123456789ABCDEF"; if (udfName[0] == '.' && (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) { @@ -477,10 +476,10 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, newIndex = 250; newName[newIndex++] = CRC_MARK; valueCRC = crc_itu_t(0, fidName, fidNameLen); - newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; - newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8]; - newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4]; - newName[newIndex++] = hexChar[(valueCRC & 0x000f)]; + newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8); + newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8); + newName[newIndex++] = hex_asc_upper_hi(valueCRC); + newName[newIndex++] = hex_asc_upper_lo(valueCRC); if (hasExt) { newName[newIndex++] = EXT_MARK; |