diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2017-06-05 14:38:59 +0200 |
---|---|---|
committer | Fam Zheng <famz@redhat.com> | 2017-06-16 07:55:00 +0800 |
commit | ae2d489c341ee98f2daad819a3812c6199481848 (patch) | |
tree | 8c0985f901284e897a27ed04467ebecc8755086a /util | |
parent | 93001e9d87ae40e7569eb0ece0eb86d9c9582e41 (diff) |
util: add stats64 module
This module provides fast paths for 64-bit atomic operations on machines
that only have 32-bit atomic access.
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Message-Id: <20170605123908.18777-11-pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Diffstat (limited to 'util')
-rw-r--r-- | util/Makefile.objs | 1 | ||||
-rw-r--r-- | util/stats64.c | 137 |
2 files changed, 138 insertions, 0 deletions
diff --git a/util/Makefile.objs b/util/Makefile.objs index c6205ebf86..8a333d3dd7 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -42,4 +42,5 @@ util-obj-y += log.o util-obj-y += qdist.o util-obj-y += qht.o util-obj-y += range.o +util-obj-y += stats64.o util-obj-y += systemd.o diff --git a/util/stats64.c b/util/stats64.c new file mode 100644 index 0000000000..9968fcceac --- /dev/null +++ b/util/stats64.c @@ -0,0 +1,137 @@ +/* + * Atomic operations on 64-bit quantities. + * + * Copyright (C) 2017 Red Hat, Inc. + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/atomic.h" +#include "qemu/stats64.h" +#include "qemu/processor.h" + +#ifndef CONFIG_ATOMIC64 +static inline void stat64_rdlock(Stat64 *s) +{ + /* Keep out incoming writers to avoid them starving us. */ + atomic_add(&s->lock, 2); + + /* If there is a concurrent writer, wait for it. */ + while (atomic_read(&s->lock) & 1) { + cpu_relax(); + } +} + +static inline void stat64_rdunlock(Stat64 *s) +{ + atomic_sub(&s->lock, 2); +} + +static inline bool stat64_wrtrylock(Stat64 *s) +{ + return atomic_cmpxchg(&s->lock, 0, 1) == 0; +} + +static inline void stat64_wrunlock(Stat64 *s) +{ + atomic_dec(&s->lock); +} + +uint64_t stat64_get(const Stat64 *s) +{ + uint32_t high, low; + + stat64_rdlock((Stat64 *)s); + + /* 64-bit writes always take the lock, so we can read in + * any order. + */ + high = atomic_read(&s->high); + low = atomic_read(&s->low); + stat64_rdunlock((Stat64 *)s); + + return ((uint64_t)high << 32) | low; +} + +bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) +{ + uint32_t old; + + if (!stat64_wrtrylock(s)) { + cpu_relax(); + return false; + } + + /* 64-bit reads always take the lock, so they don't care about the + * order of our update. By updating s->low first, we can check + * whether we have to carry into s->high. + */ + old = atomic_fetch_add(&s->low, low); + high += (old + low) < old; + atomic_add(&s->high, high); + stat64_wrunlock(s); + return true; +} + +bool stat64_min_slow(Stat64 *s, uint64_t value) +{ + uint32_t high, low; + uint64_t orig; + + if (!stat64_wrtrylock(s)) { + cpu_relax(); + return false; + } + + high = atomic_read(&s->high); + low = atomic_read(&s->low); + + orig = ((uint64_t)high << 32) | low; + if (orig < value) { + /* We have to set low before high, just like stat64_min reads + * high before low. The value may become higher temporarily, but + * stat64_get does not notice (it takes the lock) and the only ill + * effect on stat64_min is that the slow path may be triggered + * unnecessarily. + */ + atomic_set(&s->low, (uint32_t)value); + smp_wmb(); + atomic_set(&s->high, value >> 32); + } + stat64_wrunlock(s); + return true; +} + +bool stat64_max_slow(Stat64 *s, uint64_t value) +{ + uint32_t high, low; + uint64_t orig; + + if (!stat64_wrtrylock(s)) { + cpu_relax(); + return false; + } + + high = atomic_read(&s->high); + low = atomic_read(&s->low); + + orig = ((uint64_t)high << 32) | low; + if (orig > value) { + /* We have to set low before high, just like stat64_max reads + * high before low. The value may become lower temporarily, but + * stat64_get does not notice (it takes the lock) and the only ill + * effect on stat64_max is that the slow path may be triggered + * unnecessarily. + */ + atomic_set(&s->low, (uint32_t)value); + smp_wmb(); + atomic_set(&s->high, value >> 32); + } + stat64_wrunlock(s); + return true; +} +#endif |