6 #ifndef QUIESCENT_STATE_BASED_IMPL
7 #error "This is an impl file and must not be included directly!"
10 #include <xenium/reclamation/detail/orphan.hpp>
11 #include <xenium/detail/port.hpp>
15 namespace xenium {
namespace reclamation {
17 struct quiescent_state_based::thread_control_block :
18 detail::thread_block_list<thread_control_block>::entry
20 std::atomic<unsigned> local_epoch;
23 struct quiescent_state_based::thread_data
27 if (control_block ==
nullptr)
31 if (std::any_of(retire_lists.begin(), retire_lists.end(), [](
auto p) { return p != nullptr; }))
35 auto target_epoch = (global_epoch.load(std::memory_order_relaxed) + number_epochs - 1) % number_epochs;
36 assert(target_epoch < number_epochs);
37 global_thread_block_list.abandon_retired_nodes(
new detail::orphan<number_epochs>(target_epoch, retire_lists));
40 global_thread_block_list.release_entry(control_block);
41 control_block =
nullptr;
46 ensure_has_control_block();
52 if (--region_entries == 0)
56 void add_retired_node(detail::deletable_object* p)
58 add_retired_node(p, control_block->local_epoch.load(std::memory_order_relaxed));
62 void ensure_has_control_block()
64 if (control_block ==
nullptr)
66 control_block = global_thread_block_list.acquire_entry();
67 auto epoch = global_epoch.load(std::memory_order_relaxed);
69 control_block->local_epoch.store(epoch, std::memory_order_relaxed);
73 }
while (!global_epoch.compare_exchange_weak(epoch, epoch,
74 std::memory_order_acq_rel,
75 std::memory_order_relaxed));
79 void quiescent_state()
82 auto epoch = global_epoch.load(std::memory_order_acquire);
84 if (control_block->local_epoch.load(std::memory_order_relaxed) == epoch)
86 const auto new_epoch = (epoch + 1) % number_epochs;
87 if (!try_update_epoch(epoch, new_epoch))
97 control_block->local_epoch.store(epoch, std::memory_order_release);
98 detail::delete_objects(retire_lists[epoch]);
101 void add_retired_node(detail::deletable_object* p,
size_t epoch)
103 assert(epoch < number_epochs);
104 p->next = retire_lists[epoch];
105 retire_lists[epoch] = p;
108 bool try_update_epoch(
unsigned curr_epoch,
unsigned new_epoch)
110 const auto old_epoch = (curr_epoch + number_epochs - 1) % number_epochs;
111 auto prevents_update = [old_epoch](
const thread_control_block& data)
115 constexpr
auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
116 return data.local_epoch.load(memory_order) == old_epoch &&
117 data.is_active(memory_order);
121 bool cannot_update = std::any_of(global_thread_block_list.begin(), global_thread_block_list.end(),
126 if (global_epoch.load(std::memory_order_relaxed) == curr_epoch)
129 std::atomic_thread_fence(std::memory_order_acquire);
133 bool success = global_epoch.compare_exchange_strong(curr_epoch, new_epoch,
134 std::memory_order_acq_rel,
135 std::memory_order_relaxed);
147 auto cur = global_thread_block_list.adopt_abandoned_retired_nodes();
148 for (detail::deletable_object* next =
nullptr; cur !=
nullptr; cur = next)
152 add_retired_node(cur,
static_cast<detail::orphan<number_epochs>*
>(cur)->target_epoch);
156 unsigned region_entries = 0;
157 thread_control_block* control_block =
nullptr;
158 std::array<detail::deletable_object*, number_epochs> retire_lists = {};
160 friend class quiescent_state_based;
161 ALLOCATION_COUNTER(quiescent_state_based);
164 inline quiescent_state_based::region_guard::region_guard() noexcept
166 local_thread_data().enter_region();
169 inline quiescent_state_based::region_guard::~region_guard() noexcept
171 local_thread_data().leave_region();
174 template <
class T,
class MarkedPtr>
175 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(
const MarkedPtr& p) noexcept :
179 local_thread_data().enter_region();
182 template <
class T,
class MarkedPtr>
183 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(
const guard_ptr& p) noexcept :
184 guard_ptr(MarkedPtr(p))
187 template <
class T,
class MarkedPtr>
188 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(guard_ptr&& p) noexcept :
194 template <
class T,
class MarkedPtr>
195 typename quiescent_state_based::template guard_ptr<T, MarkedPtr>&
196 quiescent_state_based::guard_ptr<T, MarkedPtr>::operator=(
const guard_ptr& p) noexcept
204 local_thread_data().enter_region();
209 template <
class T,
class MarkedPtr>
210 typename quiescent_state_based::template guard_ptr<T, MarkedPtr>&
211 quiescent_state_based::guard_ptr<T, MarkedPtr>::operator=(guard_ptr&& p) noexcept
217 this->ptr = std::move(p.ptr);
223 template <
class T,
class MarkedPtr>
224 void quiescent_state_based::guard_ptr<T, MarkedPtr>::acquire(
const concurrent_ptr<T>& p,
225 std::memory_order order) noexcept
227 if (p.load(std::memory_order_relaxed) ==
nullptr)
234 local_thread_data().enter_region();
236 this->ptr = p.load(order);
238 local_thread_data().leave_region();
241 template <
class T,
class MarkedPtr>
242 bool quiescent_state_based::guard_ptr<T, MarkedPtr>::acquire_if_equal(
243 const concurrent_ptr<T>& p,
const MarkedPtr& expected, std::memory_order order) noexcept
245 auto actual = p.load(std::memory_order_relaxed);
246 if (actual ==
nullptr || actual != expected)
249 return actual == expected;
253 local_thread_data().enter_region();
255 this->ptr = p.load(order);
256 if (!this->ptr || this->ptr != expected)
258 local_thread_data().leave_region();
262 return this->ptr == expected;
265 template <
class T,
class MarkedPtr>
266 void quiescent_state_based::guard_ptr<T, MarkedPtr>::reset() noexcept
269 local_thread_data().leave_region();
273 template <
class T,
class MarkedPtr>
274 void quiescent_state_based::guard_ptr<T, MarkedPtr>::reclaim(Deleter d) noexcept
276 this->ptr->set_deleter(std::move(d));
277 local_thread_data().add_retired_node(this->ptr.get());
281 inline quiescent_state_based::thread_data& quiescent_state_based::local_thread_data()
284 static thread_local thread_data local_thread_data;
285 return local_thread_data;
288 #ifdef TRACK_ALLOCATIONS
289 inline void quiescent_state_based::count_allocation()
290 { local_thread_data().allocation_counter.count_allocation(); }
292 inline void quiescent_state_based::count_reclamation()
293 { local_thread_data().allocation_counter.count_reclamation(); }