diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/befs/btree.c | 2 | ||||
-rw-r--r-- | fs/befs/datastream.c | 2 | ||||
-rw-r--r-- | fs/binfmt_elf.c | 2 | ||||
-rw-r--r-- | fs/freevxfs/vxfs_dir.h | 2 | ||||
-rw-r--r-- | fs/freevxfs/vxfs_immed.c | 2 | ||||
-rw-r--r-- | fs/gfs2/recovery.c | 2 | ||||
-rw-r--r-- | fs/jffs2/readinode.c | 2 | ||||
-rw-r--r-- | fs/jfs/jfs_xtree.c | 2 | ||||
-rw-r--r-- | fs/locks.c | 125 | ||||
-rw-r--r-- | fs/ncpfs/mmap.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/alloc.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/dir.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/ocfs1_fs_compat.h | 2 | ||||
-rw-r--r-- | fs/ocfs2/suballoc.c | 2 | ||||
-rw-r--r-- | fs/reiserfs/bitmap.c | 6 | ||||
-rw-r--r-- | fs/signalfd.c | 2 |
16 files changed, 80 insertions, 81 deletions
diff --git a/fs/befs/btree.c b/fs/befs/btree.c index af5bb93276f8..4202db7496cb 100644 --- a/fs/befs/btree.c +++ b/fs/befs/btree.c @@ -232,7 +232,7 @@ befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, * @key: Key string to lookup in btree * @value: Value stored with @key * - * On sucess, returns BEFS_OK and sets *@value to the value stored + * On success, returns BEFS_OK and sets *@value to the value stored * with @key (usually the disk block number of an inode). * * On failure, returns BEFS_ERR or BEFS_BT_NOT_FOUND. diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c index aacb4da6298a..e3287d0d1a58 100644 --- a/fs/befs/datastream.c +++ b/fs/befs/datastream.c @@ -236,7 +236,7 @@ befs_count_blocks(struct super_block * sb, befs_data_stream * ds) as in the indirect region code). When/if blockno is found, if blockno is inside of a block - run as stored on disk, we offset the start and lenght members + run as stored on disk, we offset the start and length members of the block run, so that blockno is the start and len is still valid (the run ends in the same place). diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 18ed6dd906c1..4628c42ca892 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -117,7 +117,7 @@ static int padzero(unsigned long elf_bss) return 0; } -/* Let's use some macros to make this stack manipulation a litle clearer */ +/* Let's use some macros to make this stack manipulation a little clearer */ #ifdef CONFIG_STACK_GROWSUP #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) #define STACK_ROUND(sp, items) \ diff --git a/fs/freevxfs/vxfs_dir.h b/fs/freevxfs/vxfs_dir.h index 3c96d6e63978..aaf1fb098639 100644 --- a/fs/freevxfs/vxfs_dir.h +++ b/fs/freevxfs/vxfs_dir.h @@ -41,7 +41,7 @@ * VxFS directory block header. * * This entry is the head of every filesystem block in a directory. - * It is used for free space managment and additionally includes + * It is used for free space management and additionally includes * a hash for speeding up directory search (lookup). * * The hash may be empty and in fact we do not use it all in the diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c index 24b5a775ff96..8a5959a61ba9 100644 --- a/fs/freevxfs/vxfs_immed.c +++ b/fs/freevxfs/vxfs_immed.c @@ -54,7 +54,7 @@ const struct inode_operations vxfs_immed_symlink_iops = { }; /* - * Adress space operations for immed files and directories. + * Address space operations for immed files and directories. */ const struct address_space_operations vxfs_immed_aops = { .readpage = vxfs_immed_readpage, diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index b249e294a95b..6fb07d67ca8a 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -450,7 +450,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd) fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n", jd->jd_jid); - /* Aquire the journal lock so we can do recovery */ + /* Acquire the journal lock so we can do recovery */ error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops, LM_ST_EXCLUSIVE, diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 2eae5d2dbebe..6c1ba3566f58 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -741,7 +741,7 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref * are not obsolete. * * Of course, this optimization only makes sense in case - * of NAND flashes (or other flashes whith + * of NAND flashes (or other flashes with * !jffs2_can_mark_obsolete()), since on NOR flashes * nodes are marked obsolete physically. * diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c index 1543906a2e0d..a000aaa75136 100644 --- a/fs/jfs/jfs_xtree.c +++ b/fs/jfs/jfs_xtree.c @@ -3965,7 +3965,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag) * xtTruncate_pmap() * * function: - * Perform truncate to zero lenghth for deleted file, leaving the + * Perform truncate to zero length for deleted file, leaving the * the xtree and working map untouched. This allows the file to * be accessed via open file handles, while the delete of the file * is committed to disk. diff --git a/fs/locks.c b/fs/locks.c index 8b8388eca05e..49354b9c7dc1 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -125,6 +125,7 @@ #include <linux/syscalls.h> #include <linux/time.h> #include <linux/rcupdate.h> +#include <linux/pid_namespace.h> #include <asm/semaphore.h> #include <asm/uaccess.h> @@ -185,6 +186,7 @@ void locks_init_lock(struct file_lock *fl) fl->fl_fasync = NULL; fl->fl_owner = NULL; fl->fl_pid = 0; + fl->fl_nspid = NULL; fl->fl_file = NULL; fl->fl_flags = 0; fl->fl_type = 0; @@ -553,6 +555,8 @@ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) { list_add(&fl->fl_link, &file_lock_list); + fl->fl_nspid = get_pid(task_tgid(current)); + /* insert into file's list */ fl->fl_next = *pos; *pos = fl; @@ -584,6 +588,11 @@ static void locks_delete_lock(struct file_lock **thisfl_p) if (fl->fl_ops && fl->fl_ops->fl_remove) fl->fl_ops->fl_remove(fl); + if (fl->fl_nspid) { + put_pid(fl->fl_nspid); + fl->fl_nspid = NULL; + } + locks_wake_up_blocks(fl); locks_free_lock(fl); } @@ -634,33 +643,6 @@ static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *s return (locks_conflict(caller_fl, sys_fl)); } -static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) -{ - int result = 0; - DECLARE_WAITQUEUE(wait, current); - - __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(fl_wait, &wait); - if (timeout == 0) - schedule(); - else - result = schedule_timeout(timeout); - if (signal_pending(current)) - result = -ERESTARTSYS; - remove_wait_queue(fl_wait, &wait); - __set_current_state(TASK_RUNNING); - return result; -} - -static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) -{ - int result; - locks_insert_block(blocker, waiter); - result = interruptible_sleep_on_locked(&waiter->fl_wait, time); - __locks_delete_block(waiter); - return result; -} - void posix_test_lock(struct file *filp, struct file_lock *fl) { @@ -673,55 +655,67 @@ posix_test_lock(struct file *filp, struct file_lock *fl) if (posix_locks_conflict(fl, cfl)) break; } - if (cfl) + if (cfl) { __locks_copy_lock(fl, cfl); - else + if (cfl->fl_nspid) + fl->fl_pid = pid_nr_ns(cfl->fl_nspid, + task_active_pid_ns(current)); + } else fl->fl_type = F_UNLCK; unlock_kernel(); return; } - EXPORT_SYMBOL(posix_test_lock); -/* This function tests for deadlock condition before putting a process to - * sleep. The detection scheme is no longer recursive. Recursive was neat, - * but dangerous - we risked stack corruption if the lock data was bad, or - * if the recursion was too deep for any other reason. +/* + * Deadlock detection: + * + * We attempt to detect deadlocks that are due purely to posix file + * locks. * - * We rely on the fact that a task can only be on one lock's wait queue - * at a time. When we find blocked_task on a wait queue we can re-search - * with blocked_task equal to that queue's owner, until either blocked_task - * isn't found, or blocked_task is found on a queue owned by my_task. + * We assume that a task can be waiting for at most one lock at a time. + * So for any acquired lock, the process holding that lock may be + * waiting on at most one other lock. That lock in turns may be held by + * someone waiting for at most one other lock. Given a requested lock + * caller_fl which is about to wait for a conflicting lock block_fl, we + * follow this chain of waiters to ensure we are not about to create a + * cycle. * - * Note: the above assumption may not be true when handling lock requests - * from a broken NFS client. But broken NFS clients have a lot more to - * worry about than proper deadlock detection anyway... --okir + * Since we do this before we ever put a process to sleep on a lock, we + * are ensured that there is never a cycle; that is what guarantees that + * the while() loop in posix_locks_deadlock() eventually completes. * - * However, the failure of this assumption (also possible in the case of - * multiple tasks sharing the same open file table) also means there's no - * guarantee that the loop below will terminate. As a hack, we give up - * after a few iterations. + * Note: the above assumption may not be true when handling lock + * requests from a broken NFS client. It may also fail in the presence + * of tasks (such as posix threads) sharing the same open file table. + * + * To handle those cases, we just bail out after a few iterations. */ #define MAX_DEADLK_ITERATIONS 10 +/* Find a lock that the owner of the given block_fl is blocking on. */ +static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) +{ + struct file_lock *fl; + + list_for_each_entry(fl, &blocked_list, fl_link) { + if (posix_same_owner(fl, block_fl)) + return fl->fl_next; + } + return NULL; +} + static int posix_locks_deadlock(struct file_lock *caller_fl, struct file_lock *block_fl) { - struct file_lock *fl; int i = 0; -next_task: - if (posix_same_owner(caller_fl, block_fl)) - return 1; - list_for_each_entry(fl, &blocked_list, fl_link) { - if (posix_same_owner(fl, block_fl)) { - if (i++ > MAX_DEADLK_ITERATIONS) - return 0; - fl = fl->fl_next; - block_fl = fl; - goto next_task; - } + while ((block_fl = what_owner_is_waiting_for(block_fl))) { + if (i++ > MAX_DEADLK_ITERATIONS) + return 0; + if (posix_same_owner(caller_fl, block_fl)) + return 1; } return 0; } @@ -1256,7 +1250,10 @@ restart: if (break_time == 0) break_time++; } - error = locks_block_on_timeout(flock, new_fl, break_time); + locks_insert_block(flock, new_fl); + error = wait_event_interruptible_timeout(new_fl->fl_wait, + !new_fl->fl_next, break_time); + __locks_delete_block(new_fl); if (error >= 0) { if (error == 0) time_out_leases(inode); @@ -2084,6 +2081,12 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, int id, char *pfx) { struct inode *inode = NULL; + unsigned int fl_pid; + + if (fl->fl_nspid) + fl_pid = pid_nr_ns(fl->fl_nspid, task_active_pid_ns(current)); + else + fl_pid = fl->fl_pid; if (fl->fl_file != NULL) inode = fl->fl_file->f_path.dentry->d_inode; @@ -2124,16 +2127,16 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } if (inode) { #ifdef WE_CAN_BREAK_LSLK_NOW - seq_printf(f, "%d %s:%ld ", fl->fl_pid, + seq_printf(f, "%d %s:%ld ", fl_pid, inode->i_sb->s_id, inode->i_ino); #else /* userspace relies on this representation of dev_t ;-( */ - seq_printf(f, "%d %02x:%02x:%ld ", fl->fl_pid, + seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); #endif } else { - seq_printf(f, "%d <none>:0 ", fl->fl_pid); + seq_printf(f, "%d <none>:0 ", fl_pid); } if (IS_POSIX(fl)) { if (fl->fl_end == OFFSET_MAX) diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c index a94473d3072c..5d8dcb9ee326 100644 --- a/fs/ncpfs/mmap.c +++ b/fs/ncpfs/mmap.c @@ -50,10 +50,6 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area, pos = vmf->pgoff << PAGE_SHIFT; count = PAGE_SIZE; - if ((unsigned long)vmf->virtual_address + PAGE_SIZE > area->vm_end) { - WARN_ON(1); /* shouldn't happen? */ - count = area->vm_end - (unsigned long)vmf->virtual_address; - } /* what we can read in one go */ bufsize = NCP_SERVER(inode)->buffer_size; diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index e6df06ac6405..64713e149e46 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -3338,7 +3338,7 @@ static int ocfs2_insert_path(struct inode *inode, if (insert->ins_split != SPLIT_NONE) { /* * We could call ocfs2_insert_at_leaf() for some types - * of splits, but it's easier to just let one seperate + * of splits, but it's easier to just let one separate * function sort it all out. */ ocfs2_split_record(inode, left_path, right_path, diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 6b0107f21344..e280833ceb9a 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -1215,7 +1215,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, down_write(&oi->ip_alloc_sem); /* - * Prepare for worst case allocation scenario of two seperate + * Prepare for worst case allocation scenario of two separate * extents. */ if (alloc == 2) diff --git a/fs/ocfs2/ocfs1_fs_compat.h b/fs/ocfs2/ocfs1_fs_compat.h index 0b499bccec5a..dfb313bda5dd 100644 --- a/fs/ocfs2/ocfs1_fs_compat.h +++ b/fs/ocfs2/ocfs1_fs_compat.h @@ -77,7 +77,7 @@ struct ocfs1_disk_lock { /*00*/ __u32 curr_master; __u8 file_lock; - __u8 compat_pad[3]; /* Not in orignal definition. Used to + __u8 compat_pad[3]; /* Not in original definition. Used to make the already existing alignment explicit */ __u64 last_write_time; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 7e397e2c25dd..72c198a004df 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -646,7 +646,7 @@ bail: * sync-data inodes." * * Note: OCFS2 already does this differently for metadata vs data - * allocations, as those bitmaps are seperate and undo access is never + * allocations, as those bitmaps are separate and undo access is never * called on a metadata group descriptor. */ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index 16b331dd9913..f491ceb5af02 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c @@ -272,7 +272,7 @@ static inline int block_group_used(struct super_block *s, u32 id) /* If we don't have cached information on this bitmap block, we're * going to have to load it later anyway. Loading it here allows us - * to make a better decision. This favors long-term performace gain + * to make a better decision. This favors long-term performance gain * with a better on-disk layout vs. a short term gain of skipping the * read and potentially having a bad placement. */ if (info->free_count == UINT_MAX) { @@ -663,7 +663,7 @@ static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint) /* * Relocation based on dirid, hashing them into a given bitmap block - * files. Formatted nodes are unaffected, a seperate policy covers them + * files. Formatted nodes are unaffected, a separate policy covers them */ static void dirid_groups(reiserfs_blocknr_hint_t * hint) { @@ -688,7 +688,7 @@ static void dirid_groups(reiserfs_blocknr_hint_t * hint) /* * Relocation based on oid, hashing them into a given bitmap block - * files. Formatted nodes are unaffected, a seperate policy covers them + * files. Formatted nodes are unaffected, a separate policy covers them */ static void oid_groups(reiserfs_blocknr_hint_t * hint) { diff --git a/fs/signalfd.c b/fs/signalfd.c index fb7f7e8034df..2d3e107da2d3 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -66,7 +66,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128); /* - * Unused memebers should be zero ... + * Unused members should be zero ... */ err = __clear_user(uinfo, sizeof(*uinfo)); |