summaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-02-11 15:51:54 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2015-02-11 21:48:37 +0100
commita7d1d636797ec1b30ca4dae02f9e1eb2d6b2c439 (patch)
tree8195e09263229a8874610024181e8c09238cd0fd /util
parent444c7e0d92b5eb35fb85dc654f4bd991b0d3a0f2 (diff)
rcu: do not let RCU callbacks pile up indefinitely
Always process them within a short time. Even though waiting a little is useful, it is not okay to delay e.g. qemu_opts_del forever. Reviewed-by: Michael Roth <mdroth@linux.vnet.ibm.com> Tested-by: Michael Roth <mdroth@linux.vnet.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'util')
-rw-r--r--util/rcu.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/util/rcu.c b/util/rcu.c
index c9c3e6e4ab..486d7b6cc2 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -223,14 +223,16 @@ static void *call_rcu_thread(void *opaque)
* Fetch rcu_call_count now, we only must process elements that were
* added before synchronize_rcu() starts.
*/
- while (n < RCU_CALL_MIN_SIZE && ++tries <= 5) {
- g_usleep(100000);
- qemu_event_reset(&rcu_call_ready_event);
- n = atomic_read(&rcu_call_count);
- if (n < RCU_CALL_MIN_SIZE) {
- qemu_event_wait(&rcu_call_ready_event);
+ while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
+ g_usleep(10000);
+ if (n == 0) {
+ qemu_event_reset(&rcu_call_ready_event);
n = atomic_read(&rcu_call_count);
+ if (n == 0) {
+ qemu_event_wait(&rcu_call_ready_event);
+ }
}
+ n = atomic_read(&rcu_call_count);
}
atomic_sub(&rcu_call_count, n);