summaryrefslogtreecommitdiffstats
path: root/include/my_atomic_wrapper.h
blob: c5820b4f5b60b6abcd5da3d5e061e9f402f9048d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/* Copyright (c) 2020, 2021, MariaDB

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; version 2 of the License.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; if not, write to the Free Software
   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335  USA */

#pragma once
#ifdef __cplusplus
#include <atomic>
/**
  A wrapper for std::atomic, defaulting to std::memory_order_relaxed.

  When it comes to atomic loads or stores at std::memory_order_relaxed
  on IA-32 or AMD64, this wrapper is only introducing some constraints
  to the C++ compiler, to prevent some optimizations of loads or
  stores.

  On POWER and ARM, atomic loads and stores involve different instructions
  from normal loads and stores and will thus incur some overhead.

  Because atomic read-modify-write operations will always incur
  overhead, we intentionally do not define
  operator++(), operator--(), operator+=(), operator-=(), or similar,
  to make the overhead stand out in the users of this code.
*/
template <typename Type> class Atomic_relaxed
{
  std::atomic<Type> m;
public:
  Atomic_relaxed(const Atomic_relaxed<Type> &rhs)
  { m.store(rhs, std::memory_order_relaxed); }
  Atomic_relaxed(Type val) : m(val) {}
  Atomic_relaxed() {}

  Type load(std::memory_order o= std::memory_order_relaxed) const
  { return m.load(o); }
  void store(Type i, std::memory_order o= std::memory_order_relaxed)
  { m.store(i, o); }
  operator Type() const { return m.load(); }
  Type operator=(const Type i) { store(i); return i; }
  Type operator=(const Atomic_relaxed<Type> &rhs) { return *this= Type{rhs}; }
  Type fetch_add(const Type i, std::memory_order o= std::memory_order_relaxed)
  { return m.fetch_add(i, o); }
  Type fetch_sub(const Type i, std::memory_order o= std::memory_order_relaxed)
  { return m.fetch_sub(i, o); }
  Type fetch_xor(const Type i, std::memory_order o= std::memory_order_relaxed)
  { return m.fetch_xor(i, o); }
  Type fetch_and(const Type i, std::memory_order o= std::memory_order_relaxed)
  { return m.fetch_and(i, o); }
  Type fetch_or(const Type i, std::memory_order o= std::memory_order_relaxed)
  { return m.fetch_or(i, o); }
  bool compare_exchange_strong(Type& i1, const Type i2,
                               std::memory_order o1= std::memory_order_relaxed,
                               std::memory_order o2= std::memory_order_relaxed)
  { return m.compare_exchange_strong(i1, i2, o1, o2); }
  Type exchange(const Type i, std::memory_order o= std::memory_order_relaxed)
  { return m.exchange(i, o); }
};
#endif /* __cplusplus */