xenium
seqlock.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef XENIUM_SEQLOCK
7 #define XENIUM_SEQLOCK
8 
9 #include <xenium/parameter.hpp>
10 #include <xenium/detail/port.hpp>
11 
12 #include <atomic>
13 #include <cassert>
14 #include <cstdint>
15 #include <memory>
16 
17 namespace xenium {
18 
19 namespace policy {
24  template <unsigned Value>
25  struct slots;
26 }
27 
60 template<class T, class... Policies>
61 struct seqlock {
62  using value_type = T;
63 
64  static_assert(std::is_default_constructible_v<T>, "T must be default constructible");
65  static_assert(std::is_trivially_destructible_v<T>, "T must be trivially destructible");
66  static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable");
67  static_assert(sizeof(T) > sizeof(std::uintptr_t),
68  "For types T with a size less or equal to a pointer use an atomic<T> with a compare_exchange update loop.");
69 
70  static constexpr unsigned slots = parameter::value_param_t<unsigned, policy::slots, 1, Policies...>::value;
71  static_assert(slots >= 1, "slots must be >= 1");
72 
76  seqlock() = default;
77 
81  explicit seqlock(const T& data) {
82  new(&_data) T(data);
83  }
84 
91  template <class... Args>
92  explicit seqlock(Args&&... args) {
93  new(&_data) T(std::forward<Args>(args)...);
94  }
95 
96  seqlock(const seqlock&) = delete;
97  seqlock(seqlock&&) = delete;
98 
99  seqlock& operator=(const seqlock&) = delete;
100  seqlock& operator=(seqlock&&) = delete;
101 
109  T load() const;
110 
118  void store(const T& value);
119 
131  template <class Func>
132  void update(Func func);
133 
134 private:
135  using storage_t = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
136  using sequence_t = uintptr_t;
137  using copy_t = uintptr_t;
138 
139  bool is_write_pending(sequence_t seq) const { return (seq & 1) != 0; }
140 
141  sequence_t acquire_lock();
142  void release_lock(sequence_t seq);
143 
144  void read_data(T& dest, const storage_t& src) const;
145  void store_data(const T& src, storage_t& dest);
146 
147  std::atomic<sequence_t> _seq{0};
148  storage_t _data[slots];
149 };
150 
151 template <class T, class... Policies>
153  T result;
154  // (1) - this acquire-load synchronizes-with the release-store (5)
155  sequence_t seq = _seq.load(std::memory_order_acquire);
156  for (;;) {
157  unsigned idx;
158  if (slots == 1) {
159  // wait while update is in progress...
160  // this is only necessary if we have a single slot, since otherwise we let
161  // reader and writer operate on separate slots.
162  while (is_write_pending(seq)) {
163  // (2) - this acquire-load synchronizes-with the release-store (5)
164  seq = _seq.load(std::memory_order_acquire);
165  }
166  idx = 0;
167  } else {
168  idx = (seq >> 1) % slots;
169  }
170 
171  read_data(result, _data[idx]);
172 
173  // (3) - this acquire-load synchronizes-with the release-store (5)
174  auto seq2 = _seq.load(std::memory_order_acquire);
175  if (seq2 - seq < (2 * slots - 1))
176  break;
177  seq = seq2;
178  }
179  return result;
180 }
181 
182 template <class T, class... Policies>
183 template <class Func>
185  auto seq = acquire_lock();
186  T data;
187  auto idx = (seq >> 1) % slots;
188  read_data(data, _data[idx]);
189  func(data);
190  store_data(data, _data[(idx + 1) % slots]);
191  release_lock(seq);
192 }
193 
194 template <class T, class... Policies>
195 void seqlock<T, Policies...>::store(const T& value) {
196  auto seq = acquire_lock();
197  auto idx = ((seq >> 1) + 1) % slots;
198  store_data(value, _data[idx]);
199  release_lock(seq);
200 }
201 
202 template <class T, class... Policies>
203 auto seqlock<T, Policies...>::acquire_lock() -> sequence_t {
204  auto seq = _seq.load(std::memory_order_relaxed);
205  for (;;) {
206  while (is_write_pending(seq))
207  seq = _seq.load(std::memory_order_relaxed);
208 
209  assert(is_write_pending(seq) == false);
210  // (4) - this acquire-CAS synchronizes-with the release-store (5)
211  if (_seq.compare_exchange_weak(seq, seq + 1, std::memory_order_acquire, std::memory_order_relaxed))
212  return seq + 1;
213  }
214 }
215 
216 template <class T, class... Policies>
217 void seqlock<T, Policies...>::release_lock(sequence_t seq) {
218  assert(seq == _seq.load(std::memory_order_relaxed));
219  assert(is_write_pending(seq));
220  // (5) - this release-store synchronizes-with acquire-CAS (4)
221  // and the acquire-load (1, 2, 3)
222  _seq.store(seq + 1, std::memory_order_release);
223 }
224 
225 template <class T, class... Policies>
226 void seqlock<T, Policies...>::read_data(T& dest, const storage_t& src) const {
227  copy_t* pdest = reinterpret_cast<copy_t*>(&dest);
228  copy_t* pend = pdest + (sizeof(T) / sizeof(copy_t));
229  const std::atomic<copy_t>* psrc = reinterpret_cast<const std::atomic<copy_t>*>(&src);
230  for (; pdest != pend; ++psrc, ++pdest) {
231  *pdest = psrc->load(std::memory_order_relaxed);
232  }
233  // (6) - this acquire-fence synchronizes-with the release-fence (7)
234  std::atomic_thread_fence(std::memory_order_acquire);
235 
236  // Effectively this fence transforms the previous relaxed-loads into acquire-loads. This
237  // is necessary to enforce an order with the subsequent load of _seq, so that these
238  // relaxed-loads cannot be reordered with the load on _seq. This also implies that if
239  // one of these relaxed-loads returns a new value written by a concurrent update operation,
240  // the fences synchronize with each other, so it is guaranteed that the subsequent load on
241  // _seq "sees" the new sequence value and the load operation will perform a retry.
242 }
243 
244 template <class T, class... Policies>
245 void seqlock<T, Policies...>::store_data(const T& src, storage_t& dest) {
246  // (7) - this release-fence synchronizes-with the acquire-fence (6)
247  std::atomic_thread_fence(std::memory_order_release);
248 
249  const copy_t* psrc = reinterpret_cast<const copy_t*>(&src);
250  const copy_t* pend = psrc + (sizeof(T) / sizeof(copy_t));
251  std::atomic<copy_t>* pdest = reinterpret_cast<std::atomic<copy_t>*>(&dest);
252  for (; psrc != pend; ++psrc, ++pdest) {
253  pdest->store(*psrc, std::memory_order_relaxed);
254  }
255 }
256 
257 }
258 
259 #endif
xenium::seqlock
An implementation of the sequence lock (also often referred to as "sequential lock").
Definition: seqlock.hpp:61
xenium::seqlock::seqlock
seqlock()=default
Constructs an empty object.
xenium::seqlock::load
T load() const
Reads the current value.
Definition: seqlock.hpp:152
xenium::seqlock::update
void update(Func func)
Updates the stored value with the given functor.
Definition: seqlock.hpp:184
xenium::seqlock::seqlock
seqlock(Args &&... args)
Constructs an object of type T using args as the parameter list for the constructor of T.
Definition: seqlock.hpp:92
xenium::seqlock::seqlock
seqlock(const T &data)
Constructs an object of type T via copy construction.
Definition: seqlock.hpp:81
xenium::policy::slots
Policy to configure the number of slots used in seqlock.
Definition: seqlock.hpp:25
xenium::seqlock::store
void store(const T &value)
Stores the given value.
Definition: seqlock.hpp:195