xenium
Loading...
Searching...
No Matches
thread_block_list.hpp
1//
2// Copyright (c) 2018-2020 Manuel Pöter.
3// Licensed under the MIT License. See LICENSE file in the project root for full license information.
4//
5
6#ifndef XENIUM_DETAIL_THREAD_BLOCK_LIST_HPP
7#define XENIUM_DETAIL_THREAD_BLOCK_LIST_HPP
8
9#include <atomic>
10#include <iterator>
11
12#ifdef _MSC_VER
13#pragma warning(push)
14#pragma warning(disable: 4324) // structure was padded due to alignment specifier
15#endif
16
17namespace xenium { namespace reclamation { namespace detail {
18
19template <typename T, typename DeletableObject = detail::deletable_object>
20class thread_block_list
21{
22 enum class entry_state {
23 free,
24 inactive,
25 active
26 };
27public:
28 struct entry
29 {
30 entry() :
31 next_entry(nullptr),
32 state(entry_state::active)
33 {}
34
35 // Normally this load operation can use relaxed semantic, as all reclamation schemes
36 // that use it have an acquire-fence that is sequenced-after calling is_active.
37 // However, TSan does not support acquire-fences, so in order to avoid false
38 // positives we have to allow other memory orders as well.
39 bool is_active(std::memory_order memory_order = std::memory_order_relaxed) const {
40 return state.load(memory_order) == entry_state::active;
41 }
42
43 void abandon() {
44 assert(is_active());
45 // (1) - this release-store synchronizes-with the acquire-CAS (2)
46 // or any acquire-fence that is sequenced-after calling is_active.
47 state.store(entry_state::free, std::memory_order_release);
48 }
49
50 void activate() {
51 assert(state.load(std::memory_order_relaxed) == entry_state::inactive);
52 state.store(entry_state::active, std::memory_order_release);
53 }
54
55 private:
56 friend class thread_block_list;
57
58 bool try_adopt(entry_state initial_state)
59 {
60 if (state.load(std::memory_order_relaxed) == entry_state::free)
61 {
62 auto expected = entry_state::free;
63 // (2) - this acquire-CAS synchronizes-with the release-store (1)
64 return state.compare_exchange_strong(expected, initial_state, std::memory_order_acquire);
65 }
66 return false;
67 }
68
69 // next_entry is only set once when it gets inserted into the list and is never changed afterwards
70 // -> therefore it does not have to be atomic
71 T* next_entry;
72
73 // state is used to manage ownership and active status of entries
74 std::atomic<entry_state> state;
75 };
76
77 class iterator
78 {
79 T* ptr = nullptr;
80
81 explicit iterator(T* ptr) : ptr(ptr) {}
82 public:
83 using iterator_category = std::forward_iterator_tag;
84 using value_type = T;
85 using difference_type = std::ptrdiff_t;
86 using pointer = T*;
87 using reference = T&;
88
89 iterator() = default;
90
91 void swap(iterator& other) noexcept
92 {
93 std::swap(ptr, other.ptr);
94 }
95
96 iterator& operator++ ()
97 {
98 assert(ptr != nullptr);
99 ptr = ptr->next_entry;
100 return *this;
101 }
102
103 iterator operator++ (int)
104 {
105 assert(ptr != nullptr);
106 iterator tmp(*this);
107 ptr = ptr->next_entry;
108 return tmp;
109 }
110
111 bool operator == (const iterator& rhs) const
112 {
113 return ptr == rhs.ptr;
114 }
115
116 bool operator != (const iterator& rhs) const
117 {
118 return ptr != rhs.ptr;
119 }
120
121 T& operator* () const
122 {
123 assert(ptr != nullptr);
124 return *ptr;
125 }
126
127 T* operator-> () const
128 {
129 assert(ptr != nullptr);
130 return ptr;
131 }
132
133 friend class thread_block_list;
134 };
135
136 T* acquire_entry()
137 {
138 return adopt_or_create_entry(entry_state::active);
139 }
140
141 T* acquire_inactive_entry()
142 {
143 return adopt_or_create_entry(entry_state::inactive);
144 }
145
146 void release_entry(T* entry)
147 {
148 entry->abandon();
149 }
150
151 iterator begin()
152 {
153 // (3) - this acquire-load synchronizes-with the release-CAS (6)
154 return iterator{head.load(std::memory_order_acquire)};
155 }
156
157 iterator end() { return iterator{}; }
158
159 void abandon_retired_nodes(DeletableObject* obj)
160 {
161 auto last = obj;
162 auto next = last->next;
163 while (next)
164 {
165 last = next;
166 next = last->next;
167 }
168
169 auto h = abandoned_retired_nodes.load(std::memory_order_relaxed);
170 do
171 {
172 last->next = h;
173 // (4) - this releas-CAS synchronizes-with the acquire-exchange (5)
174 } while (!abandoned_retired_nodes.compare_exchange_weak(h, obj,
175 std::memory_order_release, std::memory_order_relaxed));
176 }
177
178 DeletableObject* adopt_abandoned_retired_nodes()
179 {
180 if (abandoned_retired_nodes.load(std::memory_order_relaxed) == nullptr)
181 return nullptr;
182
183 // (5) - this acquire-exchange synchronizes-with the release-CAS (4)
184 return abandoned_retired_nodes.exchange(nullptr, std::memory_order_acquire);
185 }
186
187private:
188 void add_entry(T* node)
189 {
190 auto h = head.load(std::memory_order_relaxed);
191 do
192 {
193 node->next_entry = h;
194 // (6) - this release-CAS synchronizes-with the acquire-loads (3, 7)
195 } while (!head.compare_exchange_weak(h, node, std::memory_order_release, std::memory_order_relaxed));
196 }
197
198 T* adopt_or_create_entry(entry_state initial_state)
199 {
200 static_assert(std::is_base_of<entry, T>::value, "T must derive from entry.");
201
202 // (7) - this acquire-load synchronizes-with the release-CAS (6)
203 T* result = head.load(std::memory_order_acquire);
204 while (result)
205 {
206 if (result->try_adopt(initial_state))
207 return result;
208
209 result = result->next_entry;
210 }
211
212 result = new T();
213 result->state.store(initial_state, std::memory_order_relaxed);
214 add_entry(result);
215 return result;
216 }
217
218 std::atomic<T*> head{nullptr};
219
220 alignas(64) std::atomic<DeletableObject*> abandoned_retired_nodes{nullptr};
221};
222
223}}}
224
225#ifdef _MSC_VER
226#pragma warning(pop)
227#endif
228
229#endif