1 /* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3  *     Copyright 2010 Couchbase, Inc
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License");
6  *   you may not use this file except in compliance with the License.
7  *   You may obtain a copy of the License at
8  *
9  *       http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS,
13  *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  *   See the License for the specific language governing permissions and
15  *   limitations under the License.
16  */
17 
18 #ifndef SRC_ATOMIC_H_
19 #define SRC_ATOMIC_H_ 1
20 
21 #include "config.h"
22 
23 #include <atomic>
24 #include <memory>
25 
26 #include "locks.h"
27 #include "utility.h"
28 
29 template <typename T>
atomic_setIfBigger(std::atomic<T> &obj, const T &newValue)30 void atomic_setIfBigger(std::atomic<T> &obj, const T &newValue) {
31     T oldValue = obj.load();
32     while (newValue > oldValue) {
33         if (obj.compare_exchange_strong(oldValue, newValue)) {
34             break;
35         }
36         oldValue = obj.load();
37     }
38 }
39 
40 template <typename T>
atomic_setIfLess(std::atomic<T> &obj, const T &newValue)41 void atomic_setIfLess(std::atomic<T> &obj, const T &newValue) {
42     T oldValue = obj.load();
43     while (newValue < oldValue) {
44         if (obj.compare_exchange_strong(oldValue, newValue)) {
45             break;
46         }
47         oldValue = obj.load();
48     }
49 }
50 
51 template <typename T>
atomic_swapIfNot(std::atomic<T> &obj, const T &badValue, const T &newValue)52 T atomic_swapIfNot(std::atomic<T> &obj, const T &badValue, const T &newValue) {
53     T oldValue;
54     while (true) {
55         oldValue = obj.load();
56         if (oldValue != badValue) {
57             if (obj.compare_exchange_strong(oldValue, newValue)) {
58                 break;
59             }
60         } else {
61             break;
62         }
63     }
64     return oldValue;
65 }
66 
67 /**
68  * Atomic pointer.
69  *
70  * This does *not* make the item that's pointed to atomic.
71  */
72 template <typename T>
73 class AtomicPtr : public std::atomic<T*> {
74 public:
AtomicPtr(T *initial = NULL)75     AtomicPtr(T *initial = NULL) : std::atomic<T*>(initial) {}
76 
~AtomicPtr()77     ~AtomicPtr() {}
78 
79     T *operator ->() const noexcept {
80         return std::atomic<T*>::load();
81     }
82 
83     T &operator *() const noexcept {
84         return *std::atomic<T*>::load();
85     }
86 
operator bool() const87     operator bool() const {
88         return std::atomic<T*>::load() != NULL;
89     }
90 
operator !() const91     bool operator !() const {
92         return std::atomic<T*>::load() == NULL;
93     }
94 };
95 
96 /**
97  * A lighter-weight, smaller lock than a mutex.
98  *
99  * This is primarily useful when contention is rare.
100  */
101 class SpinLock {
102 public:
103     // It seems like inlining the code caused the dtrace probe to
104     // be optimized away ;)
105     SpinLock();
106     ~SpinLock();
107 
108     void lock(void);
109     void unlock(void);
110 
111 private:
112     bool tryAcquire(void);
113 
114     std::atomic_flag lck;
115     DISALLOW_COPY_AND_ASSIGN(SpinLock);
116 };
117 
118 template <class T> class RCPtr;
119 template <class S, class Pointer, class Deleter>
120 class SingleThreadedRCPtr;
121 
122 /**
123  * A reference counted value (used by RCPtr and SingleThreadedRCPtr).
124  */
125 class RCValue {
126 public:
RCValue()127     RCValue() : _rc_refcount(0) {}
RCValue(const RCValue &)128     RCValue(const RCValue &) : _rc_refcount(0) {}
~RCValue()129     ~RCValue() {}
130 
131 private:
132     template <class MyTT> friend class RCPtr;
133     template <class MySS, class Pointer, class Deleter>
134     friend class SingleThreadedRCPtr;
_rc_incref() const135     int _rc_incref() const {
136         return ++_rc_refcount;
137     }
138 
_rc_decref() const139     int _rc_decref() const {
140         return --_rc_refcount;
141     }
142 
143     // A get method to ensure that SingleThreadedRCPtr does not need to directly
144     // refer to a RCValue.
getRCValue() const145     const RCValue& getRCValue() const {
146         return *this;
147     }
148 
149     mutable std::atomic<int> _rc_refcount;
150 };
151 
152 /**
153  * Concurrent reference counted pointer.
154  */
155 template <class C>
156 class RCPtr {
157 public:
RCPtr(C *init = NULL)158     RCPtr(C *init = NULL) : value(init) {
159         if (init != NULL) {
160             value->getRCValue()._rc_incref();
161         }
162     }
163 
RCPtr(const RCPtr<C> &other)164     RCPtr(const RCPtr<C> &other) : value(other.gimme()) {}
165 
~RCPtr()166     ~RCPtr() {
167         if (value && value->getRCValue()._rc_decref() == 0) {
168             delete get();
169         }
170     }
171 
reset(C *newValue = NULL)172     void reset(C *newValue = NULL) {
173         if (newValue != NULL) {
174             newValue->getRCValue()._rc_incref();
175         }
176         swap(newValue);
177     }
178 
reset(const RCPtr<C> &other)179     void reset(const RCPtr<C> &other) {
180         swap(other.gimme());
181     }
182 
183     // safe for the lifetime of this instance
get() const184     C *get() const {
185         return value;
186     }
187 
operator =(const RCPtr<C> &other)188     RCPtr<C> & operator =(const RCPtr<C> &other) {
189         reset(other);
190         return *this;
191     }
192 
operator *() const193     C &operator *() const {
194         return *value;
195     }
196 
operator ->() const197     C *operator ->() const {
198         return value;
199     }
200 
operator !() const201     bool operator! () const {
202         return !value;
203     }
204 
operator bool() const205     operator bool () const {
206         return (bool)value;
207     }
208 
209 private:
gimme() const210     C *gimme() const {
211         std::lock_guard<SpinLock> lh(lock);
212         if (value) {
213             value->getRCValue()._rc_incref();
214         }
215         return value;
216     }
217 
swap(C *newValue)218     void swap(C *newValue) {
219         C* tmp;
220         {
221             std::lock_guard<SpinLock> lh(lock);
222             tmp = value.exchange(newValue);
223         }
224         if (tmp != NULL && tmp->getRCValue()._rc_decref() == 0) {
225             delete tmp;
226         }
227     }
228 
229     AtomicPtr<C> value;
230     mutable SpinLock lock; // exists solely for the purpose of implementing reset() safely
231 };
232 
233 /**
234  * Dynamic cast for RCPtr. Modelled on method of the same name for
235  * std::shared_ptr.
236  */
237 template <class T, class U>
dynamic_pointer_cast(const RCPtr<U>& r)238 RCPtr<T> dynamic_pointer_cast(const RCPtr<U>& r) {
239     T* p = dynamic_cast<T*>(r.get());
240     return p ? RCPtr<T>(p) : RCPtr<T>();
241 }
242 
243 /**
244  * Single-threaded reference counted pointer.
245  * "Single-threaded" means that the reference counted pointer should be accessed
246  * by only one thread at any time or accesses to the reference counted pointer
247  * by multiple threads should be synchronized by the external lock.
248  *
249  * Takes the following template parameters:
250  * @tparam class T the class that the SingleThreadedRCPtr is pointing to.
251  * @tparam class Pointer the pointer type that the SingleThreadedRCPtr
252  * maintains a reference counter for.  It is defaulted to a T*, however can also
253  * be a TaggedPtr<T>.
254  * @tparam class Deleter the deleter function for deleting the object pointed to
255  * by SingleThreadedRCPtr.  It defaults to the std::default_delete function
256  * templated on class T.  However when the pointer type is a TaggedPtr<T> a
257  * specialised delete function must be provided.
258  */
259 template <class T, class Pointer = T*, class Deleter = std::default_delete<T>>
260 class SingleThreadedRCPtr {
261 public:
262     using rcptr_type = SingleThreadedRCPtr<T, Pointer, Deleter>;
263 
SingleThreadedRCPtr(Pointer init = nullptr)264     SingleThreadedRCPtr(Pointer init = nullptr) : value(init) {
265         if (init != nullptr) {
266             value->getRCValue()._rc_incref();
267         }
268     }
269 
270     // Copy construction - increases ref-count on object by 1.
SingleThreadedRCPtr(const rcptr_type& other)271     SingleThreadedRCPtr(const rcptr_type& other) : value(other.gimme()) {
272     }
273 
274     // Move construction - reference count is unchanged.
SingleThreadedRCPtr(rcptr_type&& other)275     SingleThreadedRCPtr(rcptr_type&& other) : value(other.value) {
276         other.value = nullptr;
277     }
278 
279     template <typename Y, typename P>
SingleThreadedRCPtr(const SingleThreadedRCPtr<Y, P, Deleter>& other)280     SingleThreadedRCPtr(const SingleThreadedRCPtr<Y, P, Deleter>& other)
281         : value(other.gimme()) {
282     }
283 
SingleThreadedRCPtr(std::unique_ptr<T>&& other)284     SingleThreadedRCPtr(std::unique_ptr<T>&& other)
285         : SingleThreadedRCPtr(other.release()) {
286     }
287 
~SingleThreadedRCPtr()288     ~SingleThreadedRCPtr() {
289         if (value != nullptr && value->getRCValue()._rc_decref() == 0) {
290             Deleter()(value);
291         }
292     }
293 
reset(Pointer newValue = nullptr)294     void reset(Pointer newValue = nullptr) {
295         if (newValue != nullptr) {
296             newValue->getRCValue()._rc_incref();
297         }
298         swap(newValue);
299     }
300 
reset(const rcptr_type& other)301     void reset(const rcptr_type& other) {
302         swap(other.gimme());
303     }
304 
305     // Swap - reference count is unchanged on each pointed-to object.
swap(rcptr_type& other)306     void swap(rcptr_type& other) {
307         std::swap(this->value, other.value);
308     }
309 
refCount() const310     int refCount() const {
311         return value->getRCValue()._rc_refcount.load();
312     }
313 
314     // safe for the lifetime of this instance
get() const315     Pointer get() const {
316         return value;
317     }
318 
operator =(const rcptr_type& other)319     rcptr_type& operator=(const rcptr_type& other) {
320         reset(other);
321         return *this;
322     }
323 
324     // Move-assignment - reference count is unchanged of incoming item.
operator =(rcptr_type&& other)325     rcptr_type& operator=(rcptr_type&& other) {
326         swap(other.value);
327         other.value = nullptr;
328         return *this;
329     }
330 
operator *() const331     T &operator *() const {
332         return *value;
333     }
334 
operator ->() const335     Pointer operator ->() const {
336         return value;
337     }
338 
operator !() const339     bool operator! () const {
340         return value == nullptr;
341     }
342 
operator bool() const343     operator bool () const {
344         return value != nullptr;
345     }
346 
347 private:
348     template <typename Y, typename P, typename D>
349     friend class SingleThreadedRCPtr;
350 
gimme() const351     Pointer gimme() const {
352         if (value != nullptr) {
353             value->getRCValue()._rc_incref();
354         }
355         return value;
356     }
357 
swap(Pointer newValue)358     void swap(Pointer newValue) {
359         Pointer old = value;
360         value = newValue;
361         if (old != nullptr && old->getRCValue()._rc_decref() == 0) {
362             Deleter()(old);
363         }
364     }
365 
366     Pointer value;
367 };
368 
369 template <typename T, typename Pointer, typename Deleter, class... Args>
make_STRCPtr(Args&&.... args)370 SingleThreadedRCPtr<T, Pointer, Deleter> make_STRCPtr(Args&&... args) {
371     return SingleThreadedRCPtr<T, Pointer, Deleter>(
372             new T(std::forward<Args>(args)...));
373 }
374 
375 // Makes SingleThreadedRCPtr support Swappable
376 template <typename T, typename Pointer, typename Deleter>
swap(SingleThreadedRCPtr<T, Pointer, Deleter>& a, SingleThreadedRCPtr<T, Pointer, Deleter>& b)377 void swap(SingleThreadedRCPtr<T, Pointer, Deleter>& a,
378           SingleThreadedRCPtr<T, Pointer, Deleter>& b) {
379     a.swap(b);
380 }
381 
382 /**
383  * Debugging wrapper around std::atomic which print all accesses to the atomic
384  * value to stderr.
385  */
386 template <typename T>
387 class LoggedAtomic {
388 public:
LoggedAtomic(T initial)389     LoggedAtomic(T initial)
390         : value(initial) {
391         std::lock_guard<std::mutex> lock(stderr_mutex);
392         std::cerr << "LoggedAtomic[" << this << "]::LoggedAtomic: "
393                   << value.load() << std::endl;
394 
395     }
396 
operator =(T desired)397     T operator=(T desired) {
398         std::lock_guard<std::mutex> lock(stderr_mutex);
399         value.store(desired);
400         std::cerr << "LoggedAtomic[" << this << "]::operator=: "
401                   << value.load() << std::endl;
402         return value.load();
403     }
404 
load() const405     T load() const {
406         std::lock_guard<std::mutex> lock(stderr_mutex);
407         auto result = value.load();
408         std::cerr << "LoggedAtomic[" << this << "]::load: " << result
409                   << std::endl;
410         return result;
411     }
412 
store(T desired)413     void store(T desired) {
414         std::lock_guard<std::mutex> lock(stderr_mutex);
415         value.store(desired);
416         std::cerr << "LoggedAtomic[" << this << "]::store: " << value.load()
417                   << std::endl;
418     }
419 
operator T() const420     operator T() const {
421         std::lock_guard<std::mutex> lock(stderr_mutex);
422         auto result = value.load();
423         std::cerr << "LoggedAtomic[" << this << "]::operator T: " << result
424                   << std::endl;
425         return result;
426     }
427 
exchange(T desired, std::memory_order order = std::memory_order_seq_cst)428     T exchange(T desired, std::memory_order order = std::memory_order_seq_cst) {
429         std::lock_guard<std::mutex> lock(stderr_mutex);
430         std::cerr << "LoggedAtomic[" << this << "]::exchange("
431                   << "desired:" << desired << ") = ";
432         auto result = value.exchange(desired, order);
433         std::cerr << result << std::endl;
434         return result;
435     }
436 
compare_exchange_strong(T& expected, T desired, std::memory_order order = std::memory_order_seq_cst )437     bool compare_exchange_strong(T& expected, T desired,
438                                  std::memory_order order =
439                                       std::memory_order_seq_cst ) {
440         std::lock_guard<std::mutex> lock(stderr_mutex);
441         std::cerr << "LoggedAtomic[" << this << "]::compare_exchange_strong("
442                   << "expected:" << expected << ", desired:) = " << desired;
443         auto result = value.compare_exchange_strong(expected, desired, order);
444         std::cerr << result << std::endl;
445         return result;
446     }
447 
fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst )448     T fetch_add(T arg,
449                 std::memory_order order = std::memory_order_seq_cst ) {
450         std::lock_guard<std::mutex> lock(stderr_mutex);
451         T result = value.fetch_add(arg, order);
452         std::cerr << "LoggedAtomic[" << this << "]::fetch_add(" << arg
453                   << "): " << result << std::endl;
454         return value.load();
455     }
456 
fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst )457     T fetch_sub(T arg,
458                 std::memory_order order = std::memory_order_seq_cst ) {
459         std::lock_guard<std::mutex> lock(stderr_mutex);
460         T result = value.fetch_sub(arg, order);
461         std::cerr << "LoggedAtomic[" << this << "]::fetch_sub(" << arg
462                   << "): " << result << std::endl;
463         return value.load();
464     }
465 
operator ++()466     T operator++() {
467         std::lock_guard<std::mutex> lock(stderr_mutex);
468         ++value;
469         std::cerr << "LoggedAtomic[" << this << "]::pre-increment: "
470                   << value << std::endl;
471         return value;
472     }
473 
operator --()474     T operator--() {
475         std::lock_guard<std::mutex> lock(stderr_mutex);
476         --value;
477         std::cerr << "LoggedAtomic[" << this << "]::pre-decrement: " << value
478                   << std::endl;
479         return value;
480     }
481 
482 protected:
483     mutable std::mutex stderr_mutex;
484     std::atomic<T> value;
485 };
486 
487 #endif  // SRC_ATOMIC_H_
488