summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2008-06-04 15:06:21 +0100
committerSteven Whitehouse <swhiteho@redhat.com>2008-06-27 09:39:50 +0100
commit31fcba00fe7145527b159f8893ec6c9cc61309fd (patch)
tree6fef1dc1b20166b881356159d9ac8208a9335978
parentb2cad26cfc2091050574a460b304ed103a35dbda (diff)
[GFS2] Remove all_list from lock_dlm
I discovered that we had a list onto which every lock_dlm lock was being put. Its only function was to discover whether we'd got any locks left after umount. Since there was already a counter for that purpose as well, I removed the list. The saving is sizeof(struct list_head) per glock - well worth having. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
-rw-r--r--fs/gfs2/locking/dlm/lock.c23
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h2
-rw-r--r--fs/gfs2/locking/dlm/mount.c6
3 files changed, 1 insertions, 30 deletions
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
index 894df4567a0..2482c904750 100644
--- a/fs/gfs2/locking/dlm/lock.c
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -58,9 +58,6 @@ static void gdlm_delete_lp(struct gdlm_lock *lp)
spin_lock(&ls->async_lock);
if (!list_empty(&lp->delay_list))
list_del_init(&lp->delay_list);
- gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- list_del_init(&lp->all_list);
ls->all_locks_count--;
spin_unlock(&ls->async_lock);
@@ -397,7 +394,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
INIT_LIST_HEAD(&lp->delay_list);
spin_lock(&ls->async_lock);
- list_add(&lp->all_list, &ls->all_locks);
ls->all_locks_count++;
spin_unlock(&ls->async_lock);
@@ -710,22 +706,3 @@ void gdlm_submit_delayed(struct gdlm_ls *ls)
wake_up(&ls->thread_wait);
}
-int gdlm_release_all_locks(struct gdlm_ls *ls)
-{
- struct gdlm_lock *lp, *safe;
- int count = 0;
-
- spin_lock(&ls->async_lock);
- list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
- list_del_init(&lp->all_list);
-
- if (lp->lvb && lp->lvb != junk_lvb)
- kfree(lp->lvb);
- kfree(lp);
- count++;
- }
- spin_unlock(&ls->async_lock);
-
- return count;
-}
-
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 845a27fd303..21cf46617d9 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -74,7 +74,6 @@ struct gdlm_ls {
spinlock_t async_lock;
struct list_head delayed;
struct list_head submit;
- struct list_head all_locks;
u32 all_locks_count;
wait_queue_head_t wait_control;
struct task_struct *thread;
@@ -112,7 +111,6 @@ struct gdlm_lock {
unsigned long flags; /* lock_dlm flags LFL_ */
struct list_head delay_list; /* delayed */
- struct list_head all_list; /* all locks for the fs */
struct gdlm_lock *hold_null; /* NL lock for hold_lvb */
};
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index fa31c54c2e6..947e70673ee 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -28,7 +28,6 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
spin_lock_init(&ls->async_lock);
INIT_LIST_HEAD(&ls->delayed);
INIT_LIST_HEAD(&ls->submit);
- INIT_LIST_HEAD(&ls->all_locks);
init_waitqueue_head(&ls->thread_wait);
init_waitqueue_head(&ls->wait_control);
ls->jid = -1;
@@ -173,7 +172,6 @@ out:
static void gdlm_unmount(void *lockspace)
{
struct gdlm_ls *ls = lockspace;
- int rv;
log_debug("unmount flags %lx", ls->flags);
@@ -187,9 +185,7 @@ static void gdlm_unmount(void *lockspace)
gdlm_kobject_release(ls);
dlm_release_lockspace(ls->dlm_lockspace, 2);
gdlm_release_threads(ls);
- rv = gdlm_release_all_locks(ls);
- if (rv)
- log_info("gdlm_unmount: %d stray locks freed", rv);
+ BUG_ON(ls->all_locks_count);
out:
kfree(ls);
}