Modernize atomics

- Based on C++11's `atomic`
- Reworked `SafeRefCount` (based on the rewrite by @hpvb)
- Replaced free atomic functions by the new `SafeNumeric<T>`
- Replaced wrong cases of `volatile` by the new `SafeFlag`
- Platform-specific implementations no longer needed

Co-authored-by: Hein-Pieter van Braam-Stewart <hp@tmm.cx>
This commit is contained in:
Pedro J. Estébanez
2021-01-31 13:34:42 +01:00
parent 6d89f675b1
commit 4485b43a57
54 changed files with 641 additions and 676 deletions

View File

@ -31,181 +31,292 @@
#ifndef SAFE_REFCOUNT_H
#define SAFE_REFCOUNT_H
#include "core/os/mutex.h"
#include "core/typedefs.h"
#include "platform_config.h"
// Atomic functions, these are used for multithread safe reference counters!
#if !defined(NO_THREADS)
#ifdef NO_THREADS
#include <atomic>
/* Bogus implementation unaware of multiprocessing */
// Design goals for these classes:
// - No automatic conversions or arithmetic operators,
// to keep explicit the use of atomics everywhere.
// - Using acquire-release semantics, even to set the first value.
// The first value may be set relaxedly in many cases, but adding the distinction
// between relaxed and unrelaxed operation to the interface would make it needlessly
// flexible. There's negligible waste in having release semantics for the initial
// value and, as an important benefit, you can be sure the value is properly synchronized
// even with threads that are already running.
template <class T>
static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
if (*pw == 0)
return 0;
(*pw)++;
return *pw;
}
template <class T>
static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
(*pw)--;
return *pw;
}
template <class T>
static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
(*pw)++;
return *pw;
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
(*pw) -= val;
return *pw;
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
(*pw) += val;
return *pw;
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
if (val > *pw)
*pw = val;
return *pw;
}
#elif defined(__GNUC__)
/* Implementation for GCC & Clang */
// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
// Clang states it supports GCC atomic builtins.
template <class T>
static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
while (true) {
T tmp = static_cast<T const volatile &>(*pw);
if (tmp == 0)
return 0; // if zero, can't add to it anymore
if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp)
return tmp + 1;
}
}
template <class T>
static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
return __sync_sub_and_fetch(pw, 1);
}
template <class T>
static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
return __sync_add_and_fetch(pw, 1);
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
return __sync_sub_and_fetch(pw, val);
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
return __sync_add_and_fetch(pw, val);
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
while (true) {
T tmp = static_cast<T const volatile &>(*pw);
if (tmp >= val)
return tmp; // already greater, or equal
if (__sync_val_compare_and_swap(pw, tmp, val) == tmp)
return val;
}
}
#elif defined(_MSC_VER)
// For MSVC use a separate compilation unit to prevent windows.h from polluting
// the global namespace.
uint32_t atomic_conditional_increment(volatile uint32_t *pw);
uint32_t atomic_decrement(volatile uint32_t *pw);
uint32_t atomic_increment(volatile uint32_t *pw);
uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val);
uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val);
uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val);
uint64_t atomic_conditional_increment(volatile uint64_t *pw);
uint64_t atomic_decrement(volatile uint64_t *pw);
uint64_t atomic_increment(volatile uint64_t *pw);
uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val);
uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val);
uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val);
#else
//no threads supported?
#error Must provide atomic functions for this platform or compiler!
#endif
struct SafeRefCount {
uint32_t count;
class SafeNumeric {
std::atomic<T> value;
public:
// destroy() is called when weak_count_ drops to zero.
_ALWAYS_INLINE_ void set(T p_value) {
value.store(p_value, std::memory_order_release);
}
_ALWAYS_INLINE_ T get() const {
return value.load(std::memory_order_acquire);
}
_ALWAYS_INLINE_ T increment() {
return value.fetch_add(1, std::memory_order_acq_rel) + 1;
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postincrement() {
return value.fetch_add(1, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T decrement() {
return value.fetch_sub(1, std::memory_order_acq_rel) - 1;
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postdecrement() {
return value.fetch_sub(1, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T add(T p_value) {
return value.fetch_add(p_value, std::memory_order_acq_rel) + p_value;
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postadd(T p_value) {
return value.fetch_add(p_value, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T sub(T p_value) {
return value.fetch_sub(p_value, std::memory_order_acq_rel) - p_value;
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postsub(T p_value) {
return value.fetch_sub(p_value, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T exchange_if_greater(T p_value) {
while (true) {
T tmp = value.load(std::memory_order_acquire);
if (tmp >= p_value) {
return tmp; // already greater, or equal
}
if (value.compare_exchange_weak(tmp, p_value, std::memory_order_release)) {
return p_value;
}
}
}
_ALWAYS_INLINE_ T conditional_increment() {
while (true) {
T c = value.load(std::memory_order_acquire);
if (c == 0) {
return 0;
}
if (value.compare_exchange_weak(c, c + 1, std::memory_order_release)) {
return c + 1;
}
}
}
_ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) {
set(p_value);
}
};
class SafeFlag {
std::atomic_bool flag;
public:
_ALWAYS_INLINE_ bool is_set() const {
return flag.load(std::memory_order_acquire);
}
_ALWAYS_INLINE_ void set() {
flag.store(true, std::memory_order_release);
}
_ALWAYS_INLINE_ void clear() {
flag.store(false, std::memory_order_release);
}
_ALWAYS_INLINE_ void set_to(bool p_value) {
flag.store(p_value, std::memory_order_release);
}
_ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) {
set_to(p_value);
}
};
class SafeRefCount {
SafeNumeric<uint32_t> count;
public:
_ALWAYS_INLINE_ bool ref() { // true on success
return atomic_conditional_increment(&count) != 0;
return count.conditional_increment() != 0;
}
_ALWAYS_INLINE_ uint32_t refval() { // none-zero on success
return atomic_conditional_increment(&count);
return count.conditional_increment();
}
_ALWAYS_INLINE_ bool unref() { // true if must be disposed of
return atomic_decrement(&count) == 0;
return count.decrement() == 0;
}
_ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of
return atomic_decrement(&count);
return count.decrement();
}
_ALWAYS_INLINE_ uint32_t get() const { // nothrow
_ALWAYS_INLINE_ uint32_t get() const {
return count.get();
}
_ALWAYS_INLINE_ void init(uint32_t p_value = 1) {
count.set(p_value);
}
};
#else
template <class T>
class SafeNumeric {
protected:
T value;
public:
_ALWAYS_INLINE_ void set(T p_value) {
value = p_value;
}
_ALWAYS_INLINE_ T get() const {
return value;
}
_ALWAYS_INLINE_ T increment() {
return ++value;
}
_ALWAYS_INLINE_ T postincrement() {
return value++;
}
_ALWAYS_INLINE_ T decrement() {
return --value;
}
_ALWAYS_INLINE_ T postdecrement() {
return value--;
}
_ALWAYS_INLINE_ T add(T p_value) {
return value += p_value;
}
_ALWAYS_INLINE_ T postadd(T p_value) {
T old = value;
value += p_value;
return old;
}
_ALWAYS_INLINE_ T sub(T p_value) {
return value -= p_value;
}
_ALWAYS_INLINE_ T postsub(T p_value) {
T old = value;
value -= p_value;
return old;
}
_ALWAYS_INLINE_ T exchange_if_greater(T p_value) {
if (value < p_value) {
value = p_value;
}
return value;
}
_ALWAYS_INLINE_ T conditional_increment() {
if (value != 0) {
return 0;
} else {
return ++value;
}
}
_ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) :
value(p_value) {
}
};
class SafeFlag {
protected:
bool flag;
public:
_ALWAYS_INLINE_ bool is_set() const {
return flag;
}
_ALWAYS_INLINE_ void set() {
flag = true;
}
_ALWAYS_INLINE_ void clear() {
flag = false;
}
_ALWAYS_INLINE_ void set_to(bool p_value) {
flag = p_value;
}
_ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) :
flag(p_value) {}
};
class SafeRefCount {
uint32_t count;
public:
_ALWAYS_INLINE_ bool ref() { // true on success
if (count != 0) {
++count;
return true;
} else {
return false;
}
}
_ALWAYS_INLINE_ uint32_t refval() { // none-zero on success
if (count != 0) {
return ++count;
} else {
return 0;
}
}
_ALWAYS_INLINE_ bool unref() { // true if must be disposed of
return --count == 0;
}
_ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of
return --count;
}
_ALWAYS_INLINE_ uint32_t get() const {
return count;
}
_ALWAYS_INLINE_ void init(uint32_t p_value = 1) {
count = p_value;
}
SafeRefCount() :
count(0) {}
};
#endif
#endif // SAFE_REFCOUNT_H