stats64.c 3.3 KB
Newer Older
P
Paolo Bonzini 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/*
 * Atomic operations on 64-bit quantities.
 *
 * Copyright (C) 2017 Red Hat, Inc.
 *
 * Author: Paolo Bonzini <pbonzini@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 */

#include "qemu/osdep.h"
#include "qemu/atomic.h"
#include "qemu/stats64.h"
#include "qemu/processor.h"

#ifndef CONFIG_ATOMIC64
static inline void stat64_rdlock(Stat64 *s)
{
    /* Keep out incoming writers to avoid them starving us. */
    atomic_add(&s->lock, 2);

    /* If there is a concurrent writer, wait for it.  */
    while (atomic_read(&s->lock) & 1) {
        cpu_relax();
    }
}

static inline void stat64_rdunlock(Stat64 *s)
{
    atomic_sub(&s->lock, 2);
}

static inline bool stat64_wrtrylock(Stat64 *s)
{
    return atomic_cmpxchg(&s->lock, 0, 1) == 0;
}

static inline void stat64_wrunlock(Stat64 *s)
{
    atomic_dec(&s->lock);
}

uint64_t stat64_get(const Stat64 *s)
{
    uint32_t high, low;

    stat64_rdlock((Stat64 *)s);

    /* 64-bit writes always take the lock, so we can read in
     * any order.
     */
    high = atomic_read(&s->high);
    low = atomic_read(&s->low);
    stat64_rdunlock((Stat64 *)s);

    return ((uint64_t)high << 32) | low;
}

bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
{
    uint32_t old;

    if (!stat64_wrtrylock(s)) {
        cpu_relax();
        return false;
    }

    /* 64-bit reads always take the lock, so they don't care about the
     * order of our update.  By updating s->low first, we can check
     * whether we have to carry into s->high.
     */
    old = atomic_fetch_add(&s->low, low);
    high += (old + low) < old;
    atomic_add(&s->high, high);
    stat64_wrunlock(s);
    return true;
}

bool stat64_min_slow(Stat64 *s, uint64_t value)
{
    uint32_t high, low;
    uint64_t orig;

    if (!stat64_wrtrylock(s)) {
        cpu_relax();
        return false;
    }

    high = atomic_read(&s->high);
    low = atomic_read(&s->low);

    orig = ((uint64_t)high << 32) | low;
M
Max Reitz 已提交
94
    if (value < orig) {
P
Paolo Bonzini 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
        /* We have to set low before high, just like stat64_min reads
         * high before low.  The value may become higher temporarily, but
         * stat64_get does not notice (it takes the lock) and the only ill
         * effect on stat64_min is that the slow path may be triggered
         * unnecessarily.
         */
        atomic_set(&s->low, (uint32_t)value);
        smp_wmb();
        atomic_set(&s->high, value >> 32);
    }
    stat64_wrunlock(s);
    return true;
}

bool stat64_max_slow(Stat64 *s, uint64_t value)
{
    uint32_t high, low;
    uint64_t orig;

    if (!stat64_wrtrylock(s)) {
        cpu_relax();
        return false;
    }

    high = atomic_read(&s->high);
    low = atomic_read(&s->low);

    orig = ((uint64_t)high << 32) | low;
M
Max Reitz 已提交
123
    if (value > orig) {
P
Paolo Bonzini 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137
        /* We have to set low before high, just like stat64_max reads
         * high before low.  The value may become lower temporarily, but
         * stat64_get does not notice (it takes the lock) and the only ill
         * effect on stat64_max is that the slow path may be triggered
         * unnecessarily.
         */
        atomic_set(&s->low, (uint32_t)value);
        smp_wmb();
        atomic_set(&s->high, value >> 32);
    }
    stat64_wrunlock(s);
    return true;
}
#endif