1 /* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3  *     Copyright 2010 Couchbase, Inc
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License");
6  *   you may not use this file except in compliance with the License.
7  *   You may obtain a copy of the License at
8  *
9  *       http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS,
13  *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  *   See the License for the specific language governing permissions and
15  *   limitations under the License.
16  */
17 #pragma once
18 
19 #include "locks.h"
20 #include "utility.h"
21 #include <atomic>
22 #include <iostream>
23 #include <memory>
24 
25 template <typename T>
atomic_setIfBigger(std::atomic<T> &obj, const T &newValue)26 void atomic_setIfBigger(std::atomic<T> &obj, const T &newValue) {
27     T oldValue = obj.load();
28     while (newValue > oldValue) {
29         if (obj.compare_exchange_strong(oldValue, newValue)) {
30             break;
31         }
32         oldValue = obj.load();
33     }
34 }
35 
36 template <typename T>
atomic_setIfLess(std::atomic<T> &obj, const T &newValue)37 void atomic_setIfLess(std::atomic<T> &obj, const T &newValue) {
38     T oldValue = obj.load();
39     while (newValue < oldValue) {
40         if (obj.compare_exchange_strong(oldValue, newValue)) {
41             break;
42         }
43         oldValue = obj.load();
44     }
45 }
46 
47 template <typename T>
atomic_swapIfNot(std::atomic<T> &obj, const T &badValue, const T &newValue)48 T atomic_swapIfNot(std::atomic<T> &obj, const T &badValue, const T &newValue) {
49     T oldValue;
50     while (true) {
51         oldValue = obj.load();
52         if (oldValue != badValue) {
53             if (obj.compare_exchange_strong(oldValue, newValue)) {
54                 break;
55             }
56         } else {
57             break;
58         }
59     }
60     return oldValue;
61 }
62 
63 /**
64  * Atomic pointer.
65  *
66  * This does *not* make the item that's pointed to atomic.
67  */
68 template <typename T>
69 class AtomicPtr : public std::atomic<T*> {
70 public:
AtomicPtr(T *initial = NULL)71     AtomicPtr(T *initial = NULL) : std::atomic<T*>(initial) {}
72 
~AtomicPtr()73     ~AtomicPtr() {}
74 
75     T *operator ->() const noexcept {
76         return std::atomic<T*>::load();
77     }
78 
79     T &operator *() const noexcept {
80         return *std::atomic<T*>::load();
81     }
82 
operator bool() const83     operator bool() const {
84         return std::atomic<T*>::load() != NULL;
85     }
86 
operator !() const87     bool operator !() const {
88         return std::atomic<T*>::load() == NULL;
89     }
90 };
91 
92 /**
93  * A lighter-weight, smaller lock than a mutex.
94  *
95  * This is primarily useful when contention is rare.
96  */
97 class SpinLock {
98 public:
99     // It seems like inlining the code caused the dtrace probe to
100     // be optimized away ;)
101     SpinLock();
102     ~SpinLock();
103 
104     void lock(void);
105     void unlock(void);
106 
107 private:
108     bool tryAcquire(void);
109 
110     std::atomic_flag lck;
111     DISALLOW_COPY_AND_ASSIGN(SpinLock);
112 };
113 
114 template <class T> class RCPtr;
115 template <class S, class Pointer, class Deleter>
116 class SingleThreadedRCPtr;
117 
118 /**
119  * A reference counted value (used by RCPtr and SingleThreadedRCPtr).
120  */
121 class RCValue {
122 public:
RCValue()123     RCValue() : _rc_refcount(0) {}
RCValue(const RCValue &)124     RCValue(const RCValue &) : _rc_refcount(0) {}
~RCValue()125     ~RCValue() {}
126 
127 private:
128     template <class MyTT> friend class RCPtr;
129     template <class MySS, class Pointer, class Deleter>
130     friend class SingleThreadedRCPtr;
_rc_incref() const131     int _rc_incref() const {
132         return ++_rc_refcount;
133     }
134 
_rc_decref() const135     int _rc_decref() const {
136         return --_rc_refcount;
137     }
138 
139     // A get method to ensure that SingleThreadedRCPtr does not need to directly
140     // refer to a RCValue.
getRCValue() const141     const RCValue& getRCValue() const {
142         return *this;
143     }
144 
145     mutable std::atomic<int> _rc_refcount;
146 };
147 
148 /**
149  * Concurrent reference counted pointer.
150  */
151 template <class C>
152 class RCPtr {
153 public:
RCPtr(C *init = NULL)154     RCPtr(C *init = NULL) : value(init) {
155         if (init != NULL) {
156             value->getRCValue()._rc_incref();
157         }
158     }
159 
RCPtr(const RCPtr<C> &other)160     RCPtr(const RCPtr<C> &other) : value(other.gimme()) {}
161 
RCPtr(RCPtr<C>&& other)162     RCPtr(RCPtr<C>&& other) {
163         value.store(other.value.load());
164         other.value.store(nullptr);
165     }
166 
~RCPtr()167     ~RCPtr() {
168         if (value && value->getRCValue()._rc_decref() == 0) {
169             delete get();
170         }
171     }
172 
reset(C *newValue = NULL)173     void reset(C *newValue = NULL) {
174         if (newValue != NULL) {
175             newValue->getRCValue()._rc_incref();
176         }
177         swap(newValue);
178     }
179 
reset(const RCPtr<C> &other)180     void reset(const RCPtr<C> &other) {
181         swap(other.gimme());
182     }
183 
184     // safe for the lifetime of this instance
get() const185     C *get() const {
186         return value;
187     }
188 
operator =(const RCPtr<C> &other)189     RCPtr<C> & operator =(const RCPtr<C> &other) {
190         reset(other);
191         return *this;
192     }
193 
operator *() const194     C &operator *() const {
195         return *value;
196     }
197 
operator ->() const198     C *operator ->() const {
199         return value;
200     }
201 
operator !() const202     bool operator! () const {
203         return !value;
204     }
205 
operator bool() const206     operator bool () const {
207         return (bool)value;
208     }
209 
210 private:
gimme() const211     C *gimme() const {
212         std::lock_guard<SpinLock> lh(lock);
213         if (value) {
214             value->getRCValue()._rc_incref();
215         }
216         return value;
217     }
218 
swap(C *newValue)219     void swap(C *newValue) {
220         C* tmp;
221         {
222             std::lock_guard<SpinLock> lh(lock);
223             tmp = value.exchange(newValue);
224         }
225         if (tmp != NULL && tmp->getRCValue()._rc_decref() == 0) {
226             delete tmp;
227         }
228     }
229 
230     AtomicPtr<C> value;
231     mutable SpinLock lock; // exists solely for the purpose of implementing reset() safely
232 };
233 
234 /**
235  * Dynamic cast for RCPtr. Modelled on method of the same name for
236  * std::shared_ptr.
237  */
238 template <class T, class U>
dynamic_pointer_cast(const RCPtr<U>& r)239 RCPtr<T> dynamic_pointer_cast(const RCPtr<U>& r) {
240     T* p = dynamic_cast<T*>(r.get());
241     return p ? RCPtr<T>(p) : RCPtr<T>();
242 }
243 
244 /**
245  * Single-threaded reference counted pointer.
246  * "Single-threaded" means that the reference counted pointer should be accessed
247  * by only one thread at any time or accesses to the reference counted pointer
248  * by multiple threads should be synchronized by the external lock.
249  *
250  * Takes the following template parameters:
251  * @tparam class T the class that the SingleThreadedRCPtr is pointing to.
252  * @tparam class Pointer the pointer type that the SingleThreadedRCPtr
253  * maintains a reference counter for.  It is defaulted to a T*, however can also
254  * be a TaggedPtr<T>.
255  * @tparam class Deleter the deleter function for deleting the object pointed to
256  * by SingleThreadedRCPtr.  It defaults to the std::default_delete function
257  * templated on class T.  However when the pointer type is a TaggedPtr<T> a
258  * specialised delete function must be provided.
259  */
260 template <class T, class Pointer = T*, class Deleter = std::default_delete<T>>
261 class SingleThreadedRCPtr {
262 public:
263     using rcptr_type = SingleThreadedRCPtr<T, Pointer, Deleter>;
264 
SingleThreadedRCPtr(Pointer init = nullptr)265     SingleThreadedRCPtr(Pointer init = nullptr) : value(init) {
266         if (init != nullptr) {
267             value->getRCValue()._rc_incref();
268         }
269     }
270 
271     // Copy construction - increases ref-count on object by 1.
SingleThreadedRCPtr(const rcptr_type& other)272     SingleThreadedRCPtr(const rcptr_type& other) : value(other.gimme()) {
273     }
274 
275     // Move construction - reference count is unchanged.
SingleThreadedRCPtr(rcptr_type&& other)276     SingleThreadedRCPtr(rcptr_type&& other) : value(other.value) {
277         other.value = nullptr;
278     }
279 
280     template <typename Y, typename P>
SingleThreadedRCPtr(const SingleThreadedRCPtr<Y, P, Deleter>& other)281     SingleThreadedRCPtr(const SingleThreadedRCPtr<Y, P, Deleter>& other)
282         : value(other.gimme()) {
283     }
284 
SingleThreadedRCPtr(std::unique_ptr<T>&& other)285     SingleThreadedRCPtr(std::unique_ptr<T>&& other)
286         : SingleThreadedRCPtr(other.release()) {
287     }
288 
~SingleThreadedRCPtr()289     ~SingleThreadedRCPtr() {
290         if (value != nullptr && value->getRCValue()._rc_decref() == 0) {
291             Deleter()(value);
292         }
293     }
294 
reset(Pointer newValue = nullptr)295     void reset(Pointer newValue = nullptr) {
296         if (newValue != nullptr) {
297             newValue->getRCValue()._rc_incref();
298         }
299         swap(newValue);
300     }
301 
reset(const rcptr_type& other)302     void reset(const rcptr_type& other) {
303         swap(other.gimme());
304     }
305 
306     // Swap - reference count is unchanged on each pointed-to object.
swap(rcptr_type& other)307     void swap(rcptr_type& other) {
308         std::swap(this->value, other.value);
309     }
310 
refCount() const311     int refCount() const {
312         return value->getRCValue()._rc_refcount.load();
313     }
314 
315     // safe for the lifetime of this instance
get() const316     Pointer get() const {
317         return value;
318     }
319 
320     /**
321      * Returns a reference to the owned pointer.
322      *
323      * WARNING WARNING WARNING
324      *
325      * This function is inheritly unsafe; as it exposes the internal
326      * managed pointer. Incorrect use of this could lead to memory
327      * leaks, crashes etc.  Unless you really know what you're doing
328      * don't use this!
329      */
unsafeGetPointer()330     Pointer& unsafeGetPointer() {
331         return value;
332     }
333 
operator =(const rcptr_type& other)334     rcptr_type& operator=(const rcptr_type& other) {
335         reset(other);
336         return *this;
337     }
338 
339     // Move-assignment - reference count is unchanged of incoming item.
operator =(rcptr_type&& other)340     rcptr_type& operator=(rcptr_type&& other) {
341         swap(other.value);
342         other.value = nullptr;
343         return *this;
344     }
345 
operator *() const346     T &operator *() const {
347         return *value;
348     }
349 
operator ->() const350     Pointer operator ->() const {
351         return value;
352     }
353 
operator !() const354     bool operator! () const {
355         return value == nullptr;
356     }
357 
operator bool() const358     operator bool () const {
359         return value != nullptr;
360     }
361 
362 private:
363     template <typename Y, typename P, typename D>
364     friend class SingleThreadedRCPtr;
365 
gimme() const366     Pointer gimme() const {
367         if (value != nullptr) {
368             value->getRCValue()._rc_incref();
369         }
370         return value;
371     }
372 
swap(Pointer newValue)373     void swap(Pointer newValue) {
374         Pointer old = value;
375         value = newValue;
376         if (old != nullptr && old->getRCValue()._rc_decref() == 0) {
377             Deleter()(old);
378         }
379     }
380 
381     Pointer value;
382 };
383 
384 template <typename T, typename Pointer, typename Deleter, class... Args>
make_STRCPtr(Args&&.... args)385 SingleThreadedRCPtr<T, Pointer, Deleter> make_STRCPtr(Args&&... args) {
386     return SingleThreadedRCPtr<T, Pointer, Deleter>(
387             new T(std::forward<Args>(args)...));
388 }
389 
390 // Makes SingleThreadedRCPtr support Swappable
391 template <typename T, typename Pointer, typename Deleter>
swap(SingleThreadedRCPtr<T, Pointer, Deleter>& a, SingleThreadedRCPtr<T, Pointer, Deleter>& b)392 void swap(SingleThreadedRCPtr<T, Pointer, Deleter>& a,
393           SingleThreadedRCPtr<T, Pointer, Deleter>& b) {
394     a.swap(b);
395 }
396 
397 /**
398  * Debugging wrapper around std::atomic which print all accesses to the atomic
399  * value to stderr.
400  */
401 template <typename T>
402 class LoggedAtomic {
403 public:
LoggedAtomic(T initial)404     LoggedAtomic(T initial)
405         : value(initial) {
406         std::lock_guard<std::mutex> lock(stderr_mutex);
407         std::cerr << "LoggedAtomic[" << this << "]::LoggedAtomic: "
408                   << value.load() << std::endl;
409 
410     }
411 
operator =(T desired)412     T operator=(T desired) {
413         std::lock_guard<std::mutex> lock(stderr_mutex);
414         value.store(desired);
415         std::cerr << "LoggedAtomic[" << this << "]::operator=: "
416                   << value.load() << std::endl;
417         return value.load();
418     }
419 
load() const420     T load() const {
421         std::lock_guard<std::mutex> lock(stderr_mutex);
422         auto result = value.load();
423         std::cerr << "LoggedAtomic[" << this << "]::load: " << result
424                   << std::endl;
425         return result;
426     }
427 
store(T desired)428     void store(T desired) {
429         std::lock_guard<std::mutex> lock(stderr_mutex);
430         value.store(desired);
431         std::cerr << "LoggedAtomic[" << this << "]::store: " << value.load()
432                   << std::endl;
433     }
434 
operator T() const435     operator T() const {
436         std::lock_guard<std::mutex> lock(stderr_mutex);
437         auto result = value.load();
438         std::cerr << "LoggedAtomic[" << this << "]::operator T: " << result
439                   << std::endl;
440         return result;
441     }
442 
exchange(T desired, std::memory_order order = std::memory_order_seq_cst)443     T exchange(T desired, std::memory_order order = std::memory_order_seq_cst) {
444         std::lock_guard<std::mutex> lock(stderr_mutex);
445         std::cerr << "LoggedAtomic[" << this << "]::exchange("
446                   << "desired:" << desired << ") = ";
447         auto result = value.exchange(desired, order);
448         std::cerr << result << std::endl;
449         return result;
450     }
451 
compare_exchange_strong(T& expected, T desired, std::memory_order order = std::memory_order_seq_cst )452     bool compare_exchange_strong(T& expected, T desired,
453                                  std::memory_order order =
454                                       std::memory_order_seq_cst ) {
455         std::lock_guard<std::mutex> lock(stderr_mutex);
456         std::cerr << "LoggedAtomic[" << this << "]::compare_exchange_strong("
457                   << "expected:" << expected << ", desired:) = " << desired;
458         auto result = value.compare_exchange_strong(expected, desired, order);
459         std::cerr << result << std::endl;
460         return result;
461     }
462 
fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst )463     T fetch_add(T arg,
464                 std::memory_order order = std::memory_order_seq_cst ) {
465         std::lock_guard<std::mutex> lock(stderr_mutex);
466         T result = value.fetch_add(arg, order);
467         std::cerr << "LoggedAtomic[" << this << "]::fetch_add(" << arg
468                   << "): " << result << std::endl;
469         return value.load();
470     }
471 
fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst )472     T fetch_sub(T arg,
473                 std::memory_order order = std::memory_order_seq_cst ) {
474         std::lock_guard<std::mutex> lock(stderr_mutex);
475         T result = value.fetch_sub(arg, order);
476         std::cerr << "LoggedAtomic[" << this << "]::fetch_sub(" << arg
477                   << "): " << result << std::endl;
478         return value.load();
479     }
480 
operator ++()481     T operator++() {
482         std::lock_guard<std::mutex> lock(stderr_mutex);
483         ++value;
484         std::cerr << "LoggedAtomic[" << this << "]::pre-increment: "
485                   << value << std::endl;
486         return value;
487     }
488 
operator --()489     T operator--() {
490         std::lock_guard<std::mutex> lock(stderr_mutex);
491         --value;
492         std::cerr << "LoggedAtomic[" << this << "]::pre-decrement: " << value
493                   << std::endl;
494         return value;
495     }
496 
497 protected:
498     mutable std::mutex stderr_mutex;
499     std::atomic<T> value;
500 };
501