diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2016-10-27 12:49:08 +0200 |
---|---|---|
committer | Fam Zheng <famz@redhat.com> | 2016-10-28 21:50:18 +0800 |
commit | 3fe71223374e71436d4aced8865e50fd36588ff7 (patch) | |
tree | 204c0194ef3987e0eb94eda5c2646bebd27313ca | |
parent | feadec63846d569829a4302486025b2915228712 (diff) |
aio: convert from RFifoLock to QemuRecMutex
It is simpler and a bit faster, and QEMU does not need the contention
callbacks (and thus the fairness) anymore.
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <1477565348-5458-21-git-send-email-pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
-rw-r--r-- | async.c | 8 | ||||
-rw-r--r-- | include/block/aio.h | 3 | ||||
-rw-r--r-- | include/qemu/rfifolock.h | 54 | ||||
-rw-r--r-- | tests/.gitignore | 1 | ||||
-rw-r--r-- | tests/Makefile.include | 2 | ||||
-rw-r--r-- | tests/test-rfifolock.c | 91 | ||||
-rw-r--r-- | util/Makefile.objs | 1 | ||||
-rw-r--r-- | util/rfifolock.c | 78 |
8 files changed, 5 insertions, 233 deletions
@@ -284,7 +284,7 @@ aio_ctx_finalize(GSource *source) aio_set_event_notifier(ctx, &ctx->notifier, false, NULL); event_notifier_cleanup(&ctx->notifier); - rfifolock_destroy(&ctx->lock); + qemu_rec_mutex_destroy(&ctx->lock); qemu_mutex_destroy(&ctx->bh_lock); timerlistgroup_deinit(&ctx->tlg); } @@ -372,7 +372,7 @@ AioContext *aio_context_new(Error **errp) #endif ctx->thread_pool = NULL; qemu_mutex_init(&ctx->bh_lock); - rfifolock_init(&ctx->lock, NULL, NULL); + qemu_rec_mutex_init(&ctx->lock); timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); return ctx; @@ -393,10 +393,10 @@ void aio_context_unref(AioContext *ctx) void aio_context_acquire(AioContext *ctx) { - rfifolock_lock(&ctx->lock); + qemu_rec_mutex_lock(&ctx->lock); } void aio_context_release(AioContext *ctx) { - rfifolock_unlock(&ctx->lock); + qemu_rec_mutex_unlock(&ctx->lock); } diff --git a/include/block/aio.h b/include/block/aio.h index 8e1885ba58..c7ae27c91c 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -18,7 +18,6 @@ #include "qemu/queue.h" #include "qemu/event_notifier.h" #include "qemu/thread.h" -#include "qemu/rfifolock.h" #include "qemu/timer.h" typedef struct BlockAIOCB BlockAIOCB; @@ -54,7 +53,7 @@ struct AioContext { GSource source; /* Protects all fields from multi-threaded access */ - RFifoLock lock; + QemuRecMutex lock; /* The list of registered AIO handlers */ QLIST_HEAD(, AioHandler) aio_handlers; diff --git a/include/qemu/rfifolock.h b/include/qemu/rfifolock.h deleted file mode 100644 index b23ab538a6..0000000000 --- a/include/qemu/rfifolock.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Recursive FIFO lock - * - * Copyright Red Hat, Inc. 2013 - * - * Authors: - * Stefan Hajnoczi <stefanha@redhat.com> - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_RFIFOLOCK_H -#define QEMU_RFIFOLOCK_H - -#include "qemu/thread.h" - -/* Recursive FIFO lock - * - * This lock provides more features than a plain mutex: - * - * 1. Fairness - enforces FIFO order. - * 2. Nesting - can be taken recursively. - * 3. Contention callback - optional, called when thread must wait. - * - * The recursive FIFO lock is heavyweight so prefer other synchronization - * primitives if you do not need its features. - */ -typedef struct { - QemuMutex lock; /* protects all fields */ - - /* FIFO order */ - unsigned int head; /* active ticket number */ - unsigned int tail; /* waiting ticket number */ - QemuCond cond; /* used to wait for our ticket number */ - - /* Nesting */ - QemuThread owner_thread; /* thread that currently has ownership */ - unsigned int nesting; /* amount of nesting levels */ - - /* Contention callback */ - void (*cb)(void *); /* called when thread must wait, with ->lock - * held so it may not recursively lock/unlock - */ - void *cb_opaque; -} RFifoLock; - -void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque); -void rfifolock_destroy(RFifoLock *r); -void rfifolock_lock(RFifoLock *r); -void rfifolock_unlock(RFifoLock *r); - -#endif /* QEMU_RFIFOLOCK_H */ diff --git a/tests/.gitignore b/tests/.gitignore index 64e050e859..c0d7857538 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -67,7 +67,6 @@ test-qmp-marshal.c test-qobject-output-visitor test-rcu-list test-replication -test-rfifolock test-string-input-visitor test-string-output-visitor test-thread-pool diff --git a/tests/Makefile.include b/tests/Makefile.include index 22656eaf26..04108df53a 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -45,7 +45,6 @@ check-unit-y += tests/test-visitor-serialization$(EXESUF) check-unit-y += tests/test-iov$(EXESUF) gcov-files-test-iov-y = util/iov.c check-unit-y += tests/test-aio$(EXESUF) -check-unit-$(CONFIG_POSIX) += tests/test-rfifolock$(EXESUF) check-unit-y += tests/test-throttle$(EXESUF) gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c @@ -490,7 +489,6 @@ tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y) tests/test-char$(EXESUF): tests/test-char.o qemu-char.o qemu-timer.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y) tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y) -tests/test-rfifolock$(EXESUF): tests/test-rfifolock.o $(test-util-obj-y) tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y) tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y) tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y) diff --git a/tests/test-rfifolock.c b/tests/test-rfifolock.c deleted file mode 100644 index 471a81114d..0000000000 --- a/tests/test-rfifolock.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * RFifoLock tests - * - * Copyright Red Hat, Inc. 2013 - * - * Authors: - * Stefan Hajnoczi <stefanha@redhat.com> - * - * This work is licensed under the terms of the GNU LGPL, version 2 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#include "qemu/osdep.h" -#include "qemu-common.h" -#include "qemu/rfifolock.h" - -static void test_nesting(void) -{ - RFifoLock lock; - - /* Trivial test, ensure the lock is recursive */ - rfifolock_init(&lock, NULL, NULL); - rfifolock_lock(&lock); - rfifolock_lock(&lock); - rfifolock_lock(&lock); - rfifolock_unlock(&lock); - rfifolock_unlock(&lock); - rfifolock_unlock(&lock); - rfifolock_destroy(&lock); -} - -typedef struct { - RFifoLock lock; - int fd[2]; -} CallbackTestData; - -static void rfifolock_cb(void *opaque) -{ - CallbackTestData *data = opaque; - int ret; - char c = 0; - - ret = write(data->fd[1], &c, sizeof(c)); - g_assert(ret == 1); -} - -static void *callback_thread(void *opaque) -{ - CallbackTestData *data = opaque; - - /* The other thread holds the lock so the contention callback will be - * invoked... - */ - rfifolock_lock(&data->lock); - rfifolock_unlock(&data->lock); - return NULL; -} - -static void test_callback(void) -{ - CallbackTestData data; - QemuThread thread; - int ret; - char c; - - rfifolock_init(&data.lock, rfifolock_cb, &data); - ret = qemu_pipe(data.fd); - g_assert(ret == 0); - - /* Hold lock but allow the callback to kick us by writing to the pipe */ - rfifolock_lock(&data.lock); - qemu_thread_create(&thread, "callback_thread", - callback_thread, &data, QEMU_THREAD_JOINABLE); - ret = read(data.fd[0], &c, sizeof(c)); - g_assert(ret == 1); - rfifolock_unlock(&data.lock); - /* If we got here then the callback was invoked, as expected */ - - qemu_thread_join(&thread); - close(data.fd[0]); - close(data.fd[1]); - rfifolock_destroy(&data.lock); -} - -int main(int argc, char **argv) -{ - g_test_init(&argc, &argv, NULL); - g_test_add_func("/nesting", test_nesting); - g_test_add_func("/callback", test_callback); - return g_test_run(); -} diff --git a/util/Makefile.objs b/util/Makefile.objs index 36c7dcc1fa..ad0f9c7fe4 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -25,7 +25,6 @@ util-obj-y += uuid.o util-obj-y += throttle.o util-obj-y += getauxval.o util-obj-y += readline.o -util-obj-y += rfifolock.o util-obj-y += rcu.o util-obj-y += qemu-coroutine.o qemu-coroutine-lock.o qemu-coroutine-io.o util-obj-y += qemu-coroutine-sleep.o diff --git a/util/rfifolock.c b/util/rfifolock.c deleted file mode 100644 index 084c2f0ea1..0000000000 --- a/util/rfifolock.c +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Recursive FIFO lock - * - * Copyright Red Hat, Inc. 2013 - * - * Authors: - * Stefan Hajnoczi <stefanha@redhat.com> - * - * This work is licensed under the terms of the GNU LGPL, version 2 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "qemu/rfifolock.h" - -void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque) -{ - qemu_mutex_init(&r->lock); - r->head = 0; - r->tail = 0; - qemu_cond_init(&r->cond); - r->nesting = 0; - r->cb = cb; - r->cb_opaque = opaque; -} - -void rfifolock_destroy(RFifoLock *r) -{ - qemu_cond_destroy(&r->cond); - qemu_mutex_destroy(&r->lock); -} - -/* - * Theory of operation: - * - * In order to ensure FIFO ordering, implement a ticketlock. Threads acquiring - * the lock enqueue themselves by incrementing the tail index. When the lock - * is unlocked, the head is incremented and waiting threads are notified. - * - * Recursive locking does not take a ticket since the head is only incremented - * when the outermost recursive caller unlocks. - */ -void rfifolock_lock(RFifoLock *r) -{ - qemu_mutex_lock(&r->lock); - - /* Take a ticket */ - unsigned int ticket = r->tail++; - - if (r->nesting > 0 && qemu_thread_is_self(&r->owner_thread)) { - r->tail--; /* put ticket back, we're nesting */ - } else { - while (ticket != r->head) { - /* Invoke optional contention callback */ - if (r->cb) { - r->cb(r->cb_opaque); - } - qemu_cond_wait(&r->cond, &r->lock); - } - qemu_thread_get_self(&r->owner_thread); - } - - r->nesting++; - qemu_mutex_unlock(&r->lock); -} - -void rfifolock_unlock(RFifoLock *r) -{ - qemu_mutex_lock(&r->lock); - assert(r->nesting > 0); - assert(qemu_thread_is_self(&r->owner_thread)); - if (--r->nesting == 0) { - r->head++; - qemu_cond_broadcast(&r->cond); - } - qemu_mutex_unlock(&r->lock); -} |