summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c91
1 files changed, 25 insertions, 66 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2507fe34cb..34540f9d01 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -166,45 +166,19 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
return true;
}
-static void __gfs2_glock_free(struct gfs2_glock *gl)
+void gfs2_glock_free(struct gfs2_glock *gl)
{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
-}
-
-void gfs2_glock_free(struct gfs2_glock *gl) {
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- __gfs2_glock_free(gl);
- if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_kill_wait);
-}
-
-void gfs2_glock_free_later(struct gfs2_glock *gl) {
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- spin_lock(&lru_lock);
- list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
- spin_unlock(&lru_lock);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait);
}
-static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
-{
- struct list_head *list = &sdp->sd_dead_glocks;
-
- while(!list_empty(list)) {
- struct gfs2_glock *gl;
-
- gl = list_first_entry(list, struct gfs2_glock, gl_lru);
- list_del_init(&gl->gl_lru);
- __gfs2_glock_free(gl);
- }
-}
-
/**
* gfs2_glock_hold() - increment reference count on glock
* @gl: The glock to hold
@@ -617,6 +591,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK;
+ spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
state_change(gl, state);
gh = find_first_waiter(gl);
@@ -664,6 +639,7 @@ retry:
gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
+ spin_unlock(&gl->gl_lockref.lock);
return;
}
@@ -686,6 +662,7 @@ retry:
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
}
static bool is_system_glock(struct gfs2_glock *gl)
@@ -713,7 +690,6 @@ __acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct lm_lockstruct *ls = &sdp->sd_lockstruct;
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret;
@@ -742,9 +718,6 @@ __acquires(&gl->gl_lockref.lock)
(gl->gl_state == LM_ST_EXCLUSIVE) ||
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags);
- if (!glops->go_inval && !glops->go_sync)
- goto skip_inval;
-
spin_unlock(&gl->gl_lockref.lock);
if (glops->go_sync) {
ret = glops->go_sync(gl);
@@ -757,7 +730,6 @@ __acquires(&gl->gl_lockref.lock)
fs_err(sdp, "Error %d syncing glock \n", ret);
gfs2_dump_glock(NULL, gl, true);
}
- spin_lock(&gl->gl_lockref.lock);
goto skip_inval;
}
}
@@ -778,10 +750,9 @@ __acquires(&gl->gl_lockref.lock)
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
- spin_lock(&gl->gl_lockref.lock);
skip_inval:
- gl->gl_lockref.count++;
+ gfs2_glock_hold(gl);
/*
* Check for an error encountered since we called go_sync and go_inval.
* If so, we can't withdraw from the glock code because the withdraw
@@ -823,37 +794,31 @@ skip_inval:
*/
clear_bit(GLF_LOCK, &gl->gl_flags);
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
- __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
- return;
+ gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+ goto out;
} else {
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
}
- if (ls->ls_ops->lm_lock) {
- spin_unlock(&gl->gl_lockref.lock);
- ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
- spin_lock(&gl->gl_lockref.lock);
-
+ if (sdp->sd_lockstruct.ls_ops->lm_lock) {
+ /* lock_dlm */
+ ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED &&
- test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
- /*
- * The lockspace has been released and the lock has
- * been unlocked implicitly.
- */
+ test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
+ finish_xmote(gl, target);
+ gfs2_glock_queue_work(gl, 0);
} else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret);
- target = gl->gl_state | LM_OUT_ERROR;
- } else {
- /* The operation will be completed asynchronously. */
- return;
+ GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
}
+ } else { /* lock_nolock */
+ finish_xmote(gl, target);
+ gfs2_glock_queue_work(gl, 0);
}
-
- /* Complete the operation now. */
- finish_xmote(gl, target);
- __gfs2_glock_queue_work(gl, 0);
+out:
+ spin_lock(&gl->gl_lockref.lock);
}
/**
@@ -1106,12 +1071,11 @@ static void glock_work_func(struct work_struct *work)
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1;
- spin_lock(&gl->gl_lockref.lock);
- if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
- clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
+ spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -2184,11 +2148,8 @@ static void thaw_glock(struct gfs2_glock *gl)
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
-
- spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- __gfs2_glock_queue_work(gl, 0);
- spin_unlock(&gl->gl_lockref.lock);
+ gfs2_glock_queue_work(gl, 0);
}
/**
@@ -2264,8 +2225,6 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
wait_event_timeout(sdp->sd_kill_wait,
atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600);
- gfs2_lm_unmount(sdp);
- gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
}