xenium
Loading...
Searching...
No Matches
hazard_eras.hpp
1//
2// Copyright (c) 2018-2020 Manuel Pöter.
3// Licensed under the MIT License. See LICENSE file in the project root for full license information.
4//
5
6#ifndef XENIUM_HAZARD_ERAS_HPP
7#define XENIUM_HAZARD_ERAS_HPP
8
9#include <xenium/reclamation/detail/concurrent_ptr.hpp>
10#include <xenium/reclamation/detail/guard_ptr.hpp>
11#include <xenium/reclamation/detail/deletable_object.hpp>
12#include <xenium/reclamation/detail/thread_block_list.hpp>
13#include <xenium/reclamation/detail/allocation_tracker.hpp>
14
15#include <xenium/acquire_guard.hpp>
16#include <xenium/parameter.hpp>
17#include <xenium/policy.hpp>
18
19#include <memory>
20#include <stdexcept>
21
22namespace xenium { namespace reclamation {
23
29 class bad_hazard_era_alloc : public std::runtime_error {
30 using std::runtime_error::runtime_error;
31 };
32
33 namespace detail {
34 struct deletable_object_with_eras;
35
36 template <class Strategy, class Derived>
37 struct basic_he_thread_control_block;
38
39 template <size_t K_, size_t A, size_t B, template <class> class ThreadControlBlock>
40 struct generic_hazard_era_allocation_strategy
41 {
42 static constexpr size_t K = K_;
43
44 static size_t retired_nodes_threshold() {
45 return A * number_of_active_hazard_eras() + B;
46 }
47
48 static size_t number_of_active_hazard_eras() {
49 return number_of_active_hes.load(std::memory_order_relaxed);
50 }
51
52 using thread_control_block = ThreadControlBlock<generic_hazard_era_allocation_strategy>;
53 private:
54 friend thread_control_block;
55 friend basic_he_thread_control_block<generic_hazard_era_allocation_strategy, thread_control_block>;
56
57 inline static std::atomic<size_t> number_of_active_hes{0};
58 };
59
60 template <class Strategy>
61 struct static_he_thread_control_block;
62
63 template <class Strategy>
64 struct dynamic_he_thread_control_block;
65 }
66
67 namespace he_allocation {
83 template <size_t K = 2, size_t A = 2, size_t B = 100>
84 struct static_strategy :
85 detail::generic_hazard_era_allocation_strategy<K, A, B, detail::static_he_thread_control_block> {};
86
108 template <size_t K = 2, size_t A = 2, size_t B = 100>
109 struct dynamic_strategy :
110 detail::generic_hazard_era_allocation_strategy<K, A, B, detail::dynamic_he_thread_control_block> {};
111 }
112
113 template <class AllocationStrategy = he_allocation::static_strategy<3>>
114 struct hazard_era_traits {
115 using allocation_strategy = AllocationStrategy;
116
117 template <class... Policies>
118 using with = hazard_era_traits<
119 parameter::type_param_t<policy::allocation_strategy, AllocationStrategy, Policies...>
120 >;
121 };
122
141 template <class Traits = hazard_era_traits<>>
143 {
144 using allocation_strategy = typename Traits::allocation_strategy;
145 using thread_control_block = typename allocation_strategy::thread_control_block;
146 friend detail::basic_he_thread_control_block<allocation_strategy, thread_control_block>;
147
148 template <class T, class MarkedPtr>
149 class guard_ptr;
150
151 public:
162 template <class... Policies>
163 using with = hazard_eras<typename Traits::template with<Policies...>>;
164
165 template <class T, std::size_t N = 0, class Deleter = std::default_delete<T>>
166 class enable_concurrent_ptr;
167
168 class region_guard {};
169
170 template <class T, std::size_t N = T::number_of_mark_bits>
171 using concurrent_ptr = detail::concurrent_ptr<T, N, guard_ptr>;
172
173 ALLOCATION_TRACKER;
174
175 private:
176 struct thread_data;
177
178 friend struct detail::deletable_object_with_eras;
179
180 using era_t = uint64_t;
181 inline static std::atomic<era_t> era_clock{1};
182 inline static detail::thread_block_list<thread_control_block, detail::deletable_object_with_eras> global_thread_block_list{};
183 static thread_data& local_thread_data();
184
185 ALLOCATION_TRACKING_FUNCTIONS;
186 };
187
188 template <class Traits>
189 template <class T, std::size_t N, class Deleter>
190 class hazard_eras<Traits>::enable_concurrent_ptr :
191 public detail::deletable_object_impl<T, Deleter, detail::deletable_object_with_eras>,
192 private detail::tracked_object<hazard_eras>
193 {
194 public:
195 static constexpr std::size_t number_of_mark_bits = N;
196 protected:
197 enable_concurrent_ptr() noexcept {
198 this->construction_era = era_clock.load(std::memory_order_relaxed);
199 }
200 enable_concurrent_ptr(const enable_concurrent_ptr&) noexcept = default;
201 enable_concurrent_ptr(enable_concurrent_ptr&&) noexcept = default;
202 enable_concurrent_ptr& operator=(const enable_concurrent_ptr&) noexcept = default;
203 enable_concurrent_ptr& operator=(enable_concurrent_ptr&&) noexcept = default;
204 ~enable_concurrent_ptr() noexcept = default;
205 private:
206 friend detail::deletable_object_impl<T, Deleter>;
207
208 template <class, class>
209 friend class guard_ptr;
210 };
211
212 template <class Traits>
213 template <class T, class MarkedPtr>
214 class hazard_eras<Traits>::guard_ptr : public detail::guard_ptr<T, MarkedPtr, guard_ptr<T, MarkedPtr>>
215 {
216 using base = detail::guard_ptr<T, MarkedPtr, guard_ptr>;
217 using Deleter = typename T::Deleter;
218 public:
219 guard_ptr() noexcept = default;
220
221 // Guard a marked ptr.
222 explicit guard_ptr(const MarkedPtr& p);
223 guard_ptr(const guard_ptr& p);
224 guard_ptr(guard_ptr&& p) noexcept;
225
226 guard_ptr& operator=(const guard_ptr& p) noexcept;
227 guard_ptr& operator=(guard_ptr&& p) noexcept;
228
229 // Atomically take snapshot of p, and *if* it points to unreclaimed object, acquire shared ownership of it.
230 void acquire(const concurrent_ptr<T>& p, std::memory_order order = std::memory_order_seq_cst);
231
232 // Like acquire, but quit early if a snapshot != expected.
233 bool acquire_if_equal(const concurrent_ptr<T>& p,
234 const MarkedPtr& expected,
235 std::memory_order order = std::memory_order_seq_cst);
236
237 // Release ownership. Postcondition: get() == nullptr.
238 void reset() noexcept;
239
240 // Reset. Deleter d will be applied some time after all owners release their ownership.
241 void reclaim(Deleter d = Deleter()) noexcept;
242
243 private:
244 using enable_concurrent_ptr = hazard_eras::enable_concurrent_ptr<T, MarkedPtr::number_of_mark_bits, Deleter>;
245
246 friend base;
247 void do_swap(guard_ptr& g) noexcept;
248
249 typename thread_control_block::hazard_era* he = nullptr;
250 };
251
252 namespace detail {
253 struct deletable_object_with_eras
254 {
255 virtual void delete_self() = 0;
256 deletable_object_with_eras* next = nullptr;
257 protected:
258 virtual ~deletable_object_with_eras() = default;
259 using era_t = size_t;
260 era_t construction_era{};
261 era_t retirement_era{};
262 template <class>
263 friend class hazard_eras;
264
265#ifdef __clang__
266#pragma clang diagnostic push
267// clang does not support dependent nested types for friend class declaration
268#pragma clang diagnostic ignored "-Wunsupported-friend"
269#endif
270 template <class T>
271 friend struct hazard_eras<T>::thread_data;
272#ifdef __clang__
273#pragma clang diagnostic pop
274#endif
275 };
276 }
277}}
278
279#define HAZARD_ERAS_IMPL
280#include <xenium/reclamation/impl/hazard_eras.hpp>
281#undef HAZARD_ERAS_IMPL
282
283#endif
This exception is thrown if a thread tries to allocate a new hazard era, but the number of available ...
Definition hazard_eras.hpp:29
T must be derived from enable_concurrent_ptr<T>. D is a deleter.
Definition concurrent_ptr.hpp:21
An implementation of the hazard eras scheme proposed by Ramalhete and Correia [RC17].
Definition hazard_eras.hpp:143
Slim wrapper around std::hash with specialization for pointer types.
Definition hash.hpp:25