xref: /6.6.0/phosphor/src/trace_buffer.cc (revision 6c8a36a2)
1/* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2/*
3 *     Copyright 2016 Couchbase, Inc
4 *
5 *   Licensed under the Apache License, Version 2.0 (the "License");
6 *   you may not use this file except in compliance with the License.
7 *   You may obtain a copy of the License at
8 *
9 *       http://www.apache.org/licenses/LICENSE-2.0
10 *
11 *   Unless required by applicable law or agreed to in writing, software
12 *   distributed under the License is distributed on an "AS IS" BASIS,
13 *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 *   See the License for the specific language governing permissions and
15 *   limitations under the License.
16 */
17
18#include <mutex>
19#include <stdexcept>
20
21#include <dvyukov/mpmc_bounded_queue.h>
22#include <gsl_p/dyn_array.h>
23
24#include <phosphor/platform/thread.h>
25#include <phosphor/stats_callback.h>
26#include <phosphor/trace_buffer.h>
27#include "utils/memory.h"
28
29namespace phosphor {
30
31    /*
32     * TraceChunk implementation
33     */
34
35    void TraceChunk::reset(uint32_t _thread_id) {
36        next_free = 0;
37        thread_id = _thread_id;
38    }
39
40    bool TraceChunk::isFull() const {
41        return next_free == chunk.max_size();
42    }
43
44    size_t TraceChunk::count() const {
45        return next_free;
46    }
47
48    TraceEvent& TraceChunk::addEvent() {
49        if (isFull()) {
50            throw std::out_of_range(
51                "phosphor::TraceChunk::addEvent: "
52                "All events in chunk have been used");
53        }
54        return chunk[next_free++];
55    }
56
57    const TraceEvent& TraceChunk::operator[](const int index) const {
58        return chunk[index];
59    }
60
61    uint32_t TraceChunk::threadID() const {
62        return thread_id;
63    }
64
65    TraceChunk::const_iterator TraceChunk::begin() const {
66        return chunk.begin();
67    }
68
69    TraceChunk::const_iterator TraceChunk::end() const {
70        return chunk.begin() + count();
71    }
72
73    /*
74     * TraceBufferChunkIterator implementation
75     */
76    TraceBuffer::chunk_iterator::chunk_iterator(const TraceBuffer& buffer_,
77                                                size_t index_)
78        : buffer(buffer_), index(index_) {}
79
80    TraceBuffer::chunk_iterator::chunk_iterator(const TraceBuffer& buffer_)
81        : chunk_iterator(buffer_, 0) {}
82
83    const TraceChunk& TraceBuffer::chunk_iterator::operator*() const {
84        return buffer[index];
85    }
86    const TraceChunk* TraceBuffer::chunk_iterator::operator->() const {
87        return &(buffer[index]);
88    }
89    TraceBuffer::chunk_iterator& TraceBuffer::chunk_iterator::operator++() {
90        ++index;
91        return *this;
92    }
93    bool TraceBuffer::chunk_iterator::operator==(
94        const TraceBuffer::chunk_iterator& other) const {
95        return &buffer == &(other.buffer) && index == other.index;
96    }
97    bool TraceBuffer::chunk_iterator::operator!=(
98        const TraceBuffer::chunk_iterator& other) const {
99        return !(*this == other);
100    }
101
102    /**
103     * TraceBuffer implementation that stores events in a fixed-size
104     * vector of unique pointers to BufferChunks.
105     */
106    class FixedTraceBuffer : public TraceBuffer {
107    public:
108        FixedTraceBuffer(size_t generation_, size_t buffer_size_)
109            : buffer(buffer_size_),
110              issued(0),
111              on_loan(0),
112              generation(generation_) {
113        }
114
115        ~FixedTraceBuffer() override = default;
116
117        TraceChunk* getChunk() override {
118            size_t offset = issued++;
119            if (offset >= buffer.size()) {
120                return nullptr;
121            }
122            TraceChunk& chunk = buffer[offset];
123            chunk.reset(platform::getCurrentThreadIDCached());
124            ++on_loan;
125            return &chunk;
126        }
127
128        void returnChunk(TraceChunk& chunk) override {
129            --on_loan;
130            (void)chunk;
131        }
132
133        bool isFull() const override {
134            return issued >= buffer.size();
135        }
136
137        void getStats(StatsCallback& addStats) const override {
138            using gsl_p::make_span;
139
140            addStats("buffer_name", make_span("FixedTraceBuffer"));
141            addStats("buffer_is_full", isFull());
142            auto count = chunk_count();
143            addStats("buffer_chunk_count", count);
144            addStats("buffer_total_loaned", count);
145            addStats("buffer_loaned_chunks", on_loan);
146            addStats("buffer_size", buffer.size());
147            addStats("buffer_generation", generation);
148        }
149
150        size_t getGeneration() const override {
151            return generation;
152        }
153
154        BufferMode bufferMode() const override {
155            return BufferMode::fixed;
156        }
157
158        const TraceChunk& operator[](const int index) const override {
159            return buffer[index];
160        }
161
162        size_t chunk_count() const override {
163            size_t tmp{issued};
164            return (buffer.size() > tmp) ? tmp : buffer.size();
165        }
166
167        chunk_iterator chunk_begin() const override {
168            return chunk_iterator(*this);
169        }
170
171        chunk_iterator chunk_end() const override {
172            return chunk_iterator(*this, chunk_count());
173        }
174
175        event_iterator begin() const override {
176            return event_iterator(chunk_begin(), chunk_end());
177        }
178
179        event_iterator end() const override {
180            return event_iterator(chunk_end(), chunk_end());
181        }
182
183    protected:
184        gsl_p::dyn_array<TraceChunk> buffer;
185        // This is the total number of chunks loaned out
186        std::atomic<size_t> issued;
187        // This is the number of chunks currently loaned out
188        RelaxedAtomic<size_t> on_loan;
189        size_t generation;
190    };
191
192    std::unique_ptr<TraceBuffer> make_fixed_buffer(size_t generation,
193                                                   size_t buffer_size) {
194        return utils::make_unique<FixedTraceBuffer>(generation, buffer_size);
195    }
196
197    /**
198     * TraceBuffer implementation that stores events in a fixed-size
199     * vector of unique pointers to BufferChunks.
200     */
201    class RingTraceBuffer : public TraceBuffer {
202    public:
203        RingTraceBuffer(size_t generation_, size_t buffer_size_)
204            : actual_count(0),
205              on_loan(0),
206              buffer(buffer_size_),
207              return_queue(upper_power_of_two(buffer_size_)),
208              generation(generation_) {}
209
210        ~RingTraceBuffer() override = default;
211
212        TraceChunk* getChunk() override {
213            TraceChunk* chunk = reinterpret_cast<TraceChunk*>(0xDEADB33F);
214
215            auto offset = actual_count++;
216
217            // Once we've handed out more chunks than the buffer size, start
218            // pulling chunks from the queue
219            if (offset >= buffer.size()) {
220                while (!return_queue.dequeue(chunk)) {
221                }
222            } else {
223                chunk = &buffer[offset];
224            }
225
226            chunk->reset(platform::getCurrentThreadIDCached());
227            ++on_loan;
228            return chunk;
229        }
230
231        void returnChunk(TraceChunk& chunk) override {
232            while (!return_queue.enqueue(&chunk))
233                ;
234            --on_loan;
235        }
236
237        bool isFull() const override {
238            return false;
239        }
240
241        void getStats(StatsCallback& addStats) const override {
242            using gsl_p::make_span;
243
244            addStats("buffer_name", make_span("RingTraceBuffer"));
245            addStats("buffer_is_full", isFull());
246            addStats("buffer_chunk_count",
247                     std::min(actual_count.load(std::memory_order_relaxed),
248                              buffer.size()));
249            addStats("buffer_total_loaned", actual_count);
250            addStats("buffer_loaned_chunks", on_loan);
251            addStats("buffer_size", buffer.size());
252            addStats("buffer_generation", generation);
253        }
254
255        size_t getGeneration() const override {
256            return generation;
257        }
258
259        BufferMode bufferMode() const override {
260            return BufferMode::ring;
261        }
262
263        const TraceChunk& operator[](const int index) const override {
264            return buffer[index];
265        }
266
267        size_t chunk_count() const override {
268            // If the chunks given out is greater than the buffer size
269            // then return the buffer size instead.
270            if (actual_count > buffer.size()) {
271                return buffer.size();
272            } else {
273                return actual_count;
274            }
275        }
276
277        chunk_iterator chunk_begin() const override {
278            return chunk_iterator(*this);
279        }
280
281        chunk_iterator chunk_end() const override {
282            return chunk_iterator(*this, chunk_count());
283        }
284
285        event_iterator begin() const override {
286            return event_iterator(chunk_begin(), chunk_end());
287        }
288
289        event_iterator end() const override {
290            return event_iterator(chunk_end(), chunk_end());
291        }
292
293    protected:
294        // This is the total number of chunks ever handed out
295        std::atomic<size_t> actual_count;
296        // This is the number of chunks currently loaned out
297        RelaxedAtomic<size_t> on_loan;
298        gsl_p::dyn_array<TraceChunk> buffer;
299        dvyukov::mpmc_bounded_queue<TraceChunk*> return_queue;
300        size_t generation;
301
302    private:
303        template <typename T>
304        T upper_power_of_two(T v) {
305            v--;
306            v |= v >> 1;
307            v |= v >> 2;
308            v |= v >> 4;
309            v |= v >> 8;
310            v |= v >> 16;
311            v++;
312
313            if (v == 1) {
314                v = 2;
315            }
316
317            return v;
318        }
319    };
320
321    std::unique_ptr<TraceBuffer> make_ring_buffer(size_t generation,
322                                                  size_t buffer_size) {
323        return utils::make_unique<RingTraceBuffer>(generation, buffer_size);
324    }
325}
326