xenium
Loading...
Searching...
No Matches
quiescent_state_based.hpp
1//
2// Copyright (c) 2018-2020 Manuel Pöter.
3// Licensed under the MIT License. See LICENSE file in the project root for full license information.
4//
5
6#ifndef QUIESCENT_STATE_BASED_IMPL
7#error "This is an impl file and must not be included directly!"
8#endif
9
10#include <xenium/reclamation/detail/orphan.hpp>
11#include <xenium/detail/port.hpp>
12
13#include <algorithm>
14
15namespace xenium { namespace reclamation {
16
17 struct quiescent_state_based::thread_control_block :
18 detail::thread_block_list<thread_control_block>::entry
19 {
20 std::atomic<unsigned> local_epoch;
21 };
22
23 struct quiescent_state_based::thread_data
24 {
25 ~thread_data()
26 {
27 if (control_block == nullptr)
28 return; // no control_block -> nothing to do
29
30 // we can avoid creating an orphan in case we have no retired nodes left.
31 if (std::any_of(retire_lists.begin(), retire_lists.end(), [](auto p) { return p != nullptr; }))
32 {
33 // global_epoch - 1 (mod number_epochs) guarantees a full cycle, making sure no
34 // other thread may still have a reference to an object in one of the retire lists.
35 auto target_epoch = (global_epoch.load(std::memory_order_relaxed) + number_epochs - 1) % number_epochs;
36 assert(target_epoch < number_epochs);
37 global_thread_block_list.abandon_retired_nodes(new detail::orphan<number_epochs>(target_epoch, retire_lists));
38 }
39
40 global_thread_block_list.release_entry(control_block);
41 control_block = nullptr;
42 }
43
44 void enter_region()
45 {
46 ensure_has_control_block();
47 ++region_entries;
48 }
49
50 void leave_region()
51 {
52 if (--region_entries == 0)
53 quiescent_state();
54 }
55
56 void add_retired_node(detail::deletable_object* p)
57 {
58 add_retired_node(p, control_block->local_epoch.load(std::memory_order_relaxed));
59 }
60
61 private:
62 void ensure_has_control_block()
63 {
64 if (control_block == nullptr)
65 {
66 control_block = global_thread_block_list.acquire_entry();
67 auto epoch = global_epoch.load(std::memory_order_relaxed);
68 do {
69 control_block->local_epoch.store(epoch, std::memory_order_relaxed);
70
71 // (1) - this acq_rel-CAS synchronizes-with the acquire-load (2)
72 // and the acq_rel-CAS (5)
73 } while (!global_epoch.compare_exchange_weak(epoch, epoch,
74 std::memory_order_acq_rel,
75 std::memory_order_relaxed));
76 }
77 }
78
79 void quiescent_state()
80 {
81 // (2) - this acquire-load synchronizes-with the acq_rel-CAS (1, 5)
82 auto epoch = global_epoch.load(std::memory_order_acquire);
83
84 if (control_block->local_epoch.load(std::memory_order_relaxed) == epoch)
85 {
86 const auto new_epoch = (epoch + 1) % number_epochs;
87 if (!try_update_epoch(epoch, new_epoch))
88 return;
89
90 epoch = new_epoch;
91 }
92
93 // we either just updated the global_epoch or we are observing a new epoch from some other thread
94 // either way - we can reclaim all the objects from the old 'incarnation' of this epoch
95
96 // (3) - this release-store synchronizes-with the acquire-fence (4)
97 control_block->local_epoch.store(epoch, std::memory_order_release);
98 detail::delete_objects(retire_lists[epoch]);
99 }
100
101 void add_retired_node(detail::deletable_object* p, size_t epoch)
102 {
103 assert(epoch < number_epochs);
104 p->next = retire_lists[epoch];
105 retire_lists[epoch] = p;
106 }
107
108 bool try_update_epoch(unsigned curr_epoch, unsigned new_epoch)
109 {
110 const auto old_epoch = (curr_epoch + number_epochs - 1) % number_epochs;
111 auto prevents_update = [old_epoch](const thread_control_block& data)
112 {
113 // TSan does not support explicit fences, so we cannot rely on the acquire-fence (6)
114 // but have to perform an acquire-load here to avoid false positives.
115 constexpr auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
116 return data.local_epoch.load(memory_order) == old_epoch &&
117 data.is_active(memory_order);
118 };
119
120 // If any thread hasn't advanced to the current epoch, abort the attempt.
121 bool cannot_update = std::any_of(global_thread_block_list.begin(), global_thread_block_list.end(),
122 prevents_update);
123 if (cannot_update)
124 return false;
125
126 if (global_epoch.load(std::memory_order_relaxed) == curr_epoch)
127 {
128 // (4) - this acquire-fence synchronizes-with the release-store (3)
129 std::atomic_thread_fence(std::memory_order_acquire);
130
131 // (5) - this acq_rel-CAS synchronizes-with the acquire-load (2)
132 // and the acq_rel-CAS (1)
133 bool success = global_epoch.compare_exchange_strong(curr_epoch, new_epoch,
134 std::memory_order_acq_rel,
135 std::memory_order_relaxed);
136 if (success)
137 adopt_orphans();
138 }
139
140 // return true regardless of whether the CAS operation was successful or not
141 // it's not import that THIS thread updated the epoch, but it got updated in any case
142 return true;
143 }
144
145 void adopt_orphans()
146 {
147 auto cur = global_thread_block_list.adopt_abandoned_retired_nodes();
148 for (detail::deletable_object* next = nullptr; cur != nullptr; cur = next)
149 {
150 next = cur->next;
151 cur->next = nullptr;
152 add_retired_node(cur, static_cast<detail::orphan<number_epochs>*>(cur)->target_epoch);
153 }
154 }
155
156 unsigned region_entries = 0;
157 thread_control_block* control_block = nullptr;
158 std::array<detail::deletable_object*, number_epochs> retire_lists = {};
159
160 friend class quiescent_state_based;
161 ALLOCATION_COUNTER(quiescent_state_based);
162 };
163
164 inline quiescent_state_based::region_guard::region_guard() noexcept
165 {
166 local_thread_data().enter_region();
167 }
168
169 inline quiescent_state_based::region_guard::~region_guard() noexcept
170 {
171 local_thread_data().leave_region();
172 }
173
174 template <class T, class MarkedPtr>
175 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(const MarkedPtr& p) noexcept :
176 base(p)
177 {
178 if (this->ptr)
179 local_thread_data().enter_region();
180 }
181
182 template <class T, class MarkedPtr>
183 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(const guard_ptr& p) noexcept :
184 guard_ptr(MarkedPtr(p))
185 {}
186
187 template <class T, class MarkedPtr>
188 quiescent_state_based::guard_ptr<T, MarkedPtr>::guard_ptr(guard_ptr&& p) noexcept :
189 base(p.ptr)
190 {
191 p.ptr.reset();
192 }
193
194 template <class T, class MarkedPtr>
195 typename quiescent_state_based::template guard_ptr<T, MarkedPtr>&
196 quiescent_state_based::guard_ptr<T, MarkedPtr>::operator=(const guard_ptr& p) noexcept
197 {
198 if (&p == this)
199 return *this;
200
201 reset();
202 this->ptr = p.ptr;
203 if (this->ptr)
204 local_thread_data().enter_region();
205
206 return *this;
207 }
208
209 template <class T, class MarkedPtr>
210 typename quiescent_state_based::template guard_ptr<T, MarkedPtr>&
211 quiescent_state_based::guard_ptr<T, MarkedPtr>::operator=(guard_ptr&& p) noexcept
212 {
213 if (&p == this)
214 return *this;
215
216 reset();
217 this->ptr = std::move(p.ptr);
218 p.ptr.reset();
219
220 return *this;
221 }
222
223 template <class T, class MarkedPtr>
224 void quiescent_state_based::guard_ptr<T, MarkedPtr>::acquire(const concurrent_ptr<T>& p,
225 std::memory_order order) noexcept
226 {
227 if (p.load(std::memory_order_relaxed) == nullptr)
228 {
229 reset();
230 return;
231 }
232
233 if (!this->ptr)
234 local_thread_data().enter_region();
235 // (6) - this load operation potentially synchronizes-with any release operation on p.
236 this->ptr = p.load(order);
237 if (!this->ptr)
238 local_thread_data().leave_region();
239 }
240
241 template <class T, class MarkedPtr>
242 bool quiescent_state_based::guard_ptr<T, MarkedPtr>::acquire_if_equal(
243 const concurrent_ptr<T>& p, const MarkedPtr& expected, std::memory_order order) noexcept
244 {
245 auto actual = p.load(std::memory_order_relaxed);
246 if (actual == nullptr || actual != expected)
247 {
248 reset();
249 return actual == expected;
250 }
251
252 if (!this->ptr)
253 local_thread_data().enter_region();
254 // (7) - this load operation potentially synchronizes-with any release operation on p.
255 this->ptr = p.load(order);
256 if (!this->ptr || this->ptr != expected)
257 {
258 local_thread_data().leave_region();
259 this->ptr.reset();
260 }
261
262 return this->ptr == expected;
263 }
264
265 template <class T, class MarkedPtr>
266 void quiescent_state_based::guard_ptr<T, MarkedPtr>::reset() noexcept
267 {
268 if (this->ptr)
269 local_thread_data().leave_region();
270 this->ptr.reset();
271 }
272
273 template <class T, class MarkedPtr>
274 void quiescent_state_based::guard_ptr<T, MarkedPtr>::reclaim(Deleter d) noexcept
275 {
276 this->ptr->set_deleter(std::move(d));
277 local_thread_data().add_retired_node(this->ptr.get());
278 reset();
279 }
280
281 inline quiescent_state_based::thread_data& quiescent_state_based::local_thread_data()
282 {
283 // workaround for a GCC issue with multiple definitions of __tls_guard
284 static thread_local thread_data local_thread_data;
285 return local_thread_data;
286 }
287
288#ifdef TRACK_ALLOCATIONS
289 inline void quiescent_state_based::count_allocation()
290 { local_thread_data().allocation_counter.count_allocation(); }
291
292 inline void quiescent_state_based::count_reclamation()
293 { local_thread_data().allocation_counter.count_reclamation(); }
294#endif
295}}