1 /* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3  *     Copyright 2016 Couchbase, Inc
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License");
6  *   you may not use this file except in compliance with the License.
7  *   You may obtain a copy of the License at
8  *
9  *       http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS,
13  *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  *   See the License for the specific language governing permissions and
15  *   limitations under the License.
16  */
17 
18 #include <cstring>
19 #include <exception>
20 #include <string>
21 
22 #include "phosphor/platform/thread.h"
23 #include "phosphor/stats_callback.h"
24 #include "phosphor/tools/export.h"
25 #include "phosphor/trace_log.h"
26 #include "utils/memory.h"
27 #include "utils/string_utils.h"
28 
29 namespace phosphor {
30 
31     /*
32      * TraceLog implementation
33      */
34 
35     /**
36      * The thread-specific ChunkTenant used for low contention
37      *
38      * This ChunkTenant is only used when the current thread
39      * has been registered as it requires resources allocated
40      * that are only referred to from thread-local storage.
41      */
42     THREAD_LOCAL ChunkTenant thread_chunk;
43 
TraceLog(const TraceLogConfig& _config)44     TraceLog::TraceLog(const TraceLogConfig& _config)
45         : enabled(false), generation(0) {
46         configure(_config);
47     }
48 
TraceLog()49     TraceLog::TraceLog() : TraceLog(TraceLogConfig().fromEnvironment()) {}
50 
~TraceLog()51     TraceLog::~TraceLog() {
52         stop(true);
53     }
54 
configure(const TraceLogConfig& _config)55     void TraceLog::configure(const TraceLogConfig& _config) {
56         std::lock_guard<TraceLog> lh(*this);
57 
58         if (auto* startup_trace = _config.getStartupTrace()) {
59             start(lh, *startup_trace);
60         }
61     }
62 
getInstance()63     TraceLog& TraceLog::getInstance() {
64         static TraceLog log_instance;
65         return log_instance;
66     }
67 
stop(bool shutdown)68     void TraceLog::stop(bool shutdown) {
69         std::lock_guard<TraceLog> lh(*this);
70         stop(lh, shutdown);
71     }
72 
maybe_stop(size_t _generation)73     void TraceLog::maybe_stop(size_t _generation) {
74         // If we can't acquire the lock then don't bother waiting around
75         // as it probably means another thread is in the middle of shutting
76         // down.
77         if (mutex.try_lock()) {
78             std::lock_guard<TraceLog> lh(*this, std::adopt_lock);
79 
80             // The generation has been updated since we started waiting for
81             // the lock and so we don't need to stop tracing anymore
82             if (generation != _generation) {
83                 return;
84             }
85 
86             stop(lh);
87         }
88     }
89 
stop(std::lock_guard<TraceLog>& lh, bool shutdown)90     void TraceLog::stop(std::lock_guard<TraceLog>& lh, bool shutdown) {
91         if (enabled.exchange(false)) {
92             registry.disableAll();
93             evictThreads(lh);
94             auto* cb = trace_config.getStoppedCallback();
95             if ((cb != nullptr) &&
96                 (!shutdown || trace_config.getStopTracingOnDestruct())) {
97                 (*cb)(*this, lh);
98             }
99         }
100     }
101 
start(const TraceConfig& _trace_config)102     void TraceLog::start(const TraceConfig& _trace_config) {
103         std::lock_guard<TraceLog> lh(*this);
104         start(lh, _trace_config);
105     }
106 
start(std::lock_guard<TraceLog>& lh, const TraceConfig& _trace_config)107     void TraceLog::start(std::lock_guard<TraceLog>& lh,
108                          const TraceConfig& _trace_config) {
109         trace_config = _trace_config;
110 
111         size_t buffer_size = trace_config.getBufferSize() / sizeof(TraceChunk);
112         if (buffer_size == 0) {
113             throw std::invalid_argument(
114                 "Cannot specify a buffer size less than a single chunk (" +
115                 std::to_string(sizeof(TraceChunk)) + " bytes)");
116         }
117 
118         if (enabled) {
119             stop(lh);
120         }
121 
122         buffer = trace_config.getBufferFactory()(generation++, buffer_size);
123         registry.updateEnabled(trace_config.getEnabledCategories(),
124                                trace_config.getDisabledCategories());
125         clearDeregisteredThreads();
126         enabled.store(true);
127     }
128 
logEvent(const tracepoint_info* tpi, TraceArgument argA, TraceArgument argB)129     void TraceLog::logEvent(const tracepoint_info* tpi,
130                             TraceArgument argA,
131                             TraceArgument argB) {
132         if (!enabled) {
133             return;
134         }
135         auto cl = getChunkTenant();
136         if (cl) {
137             cl.mutex()->chunk->addEvent() = TraceEvent(
138                     tpi,
139                     {{argA, argB}});
140         }
141     }
142 
logEvent(const tracepoint_info* tpi, std::chrono::steady_clock::time_point start, std::chrono::steady_clock::duration duration, TraceArgument argA, TraceArgument argB)143     void TraceLog::logEvent(const tracepoint_info* tpi,
144                             std::chrono::steady_clock::time_point start,
145                             std::chrono::steady_clock::duration duration,
146                             TraceArgument argA,
147                             TraceArgument argB) {
148         if (!enabled) {
149             return;
150         }
151         auto cl = getChunkTenant();
152         if (cl) {
153             cl.mutex()->chunk->addEvent() = TraceEvent(
154                     tpi,
155                     start,
156                     duration,
157                     {{argA, argB}});
158         }
159     }
160 
getCategoryStatus( const char* category_group)161     const AtomicCategoryStatus& TraceLog::getCategoryStatus(
162         const char* category_group) {
163         return registry.getStatus(category_group);
164     }
165 
getBuffer()166     std::unique_ptr<TraceBuffer> TraceLog::getBuffer() {
167         std::lock_guard<TraceLog> lh(*this);
168         return getBuffer(lh);
169     }
170 
getBuffer( std::lock_guard<TraceLog>&)171     std::unique_ptr<TraceBuffer> TraceLog::getBuffer(
172         std::lock_guard<TraceLog>&) {
173         if (enabled) {
174             throw std::logic_error(
175                 "phosphor::TraceLog::getBuffer: Cannot get the current "
176                 "TraceBuffer while logging is enabled");
177         }
178         return std::move(buffer);
179     }
180 
getTraceContext()181     TraceContext TraceLog::getTraceContext() {
182         std::lock_guard<TraceLog> lh(*this);
183         return getTraceContext(lh);
184     }
185 
getTraceContext(std::lock_guard<TraceLog>&)186     TraceContext TraceLog::getTraceContext(std::lock_guard<TraceLog>&) {
187         if (enabled) {
188             throw std::logic_error(
189                     "phosphor::TraceLog::getTraceContext: Cannot get the "
190                             "TraceContext while logging is enabled");
191         }
192         return TraceContext(std::move(buffer), thread_names);
193     }
194 
isEnabled() const195     bool TraceLog::isEnabled() const {
196         return enabled;
197     }
198 
registerThread(const std::string& thread_name)199     void TraceLog::registerThread(const std::string& thread_name) {
200         std::lock_guard<TraceLog> lh(*this);
201 
202         if (thread_chunk.initialised) {
203             throw std::logic_error("TraceLog::registerThread: Thread is "
204                                    "already registered");
205         }
206 
207         thread_chunk.initialised = true;
208         registered_chunk_tenants.insert(&thread_chunk);
209 
210         if (thread_name != "") {
211             // Unconditionally set the name of the thread, even for the unlikely
212             // event that it is already there.
213             thread_names[platform::getCurrentThreadIDCached()] = thread_name;
214 
215             // Make sure we don't remove our newly registered thread if we
216             // happened to reuse the TID of a thread that's been deregistered.
217             deregistered_threads.erase(platform::getCurrentThreadIDCached());
218         }
219     }
220 
deregisterThread()221     void TraceLog::deregisterThread() {
222         std::lock_guard<TraceLog> lh(*this);
223 
224         if (!thread_chunk.initialised) {
225             throw std::logic_error(
226                 "phosphor::TraceLog::deregisterThread: This thread has "
227                 "not been previously registered");
228         }
229 
230         if (thread_chunk.chunk) {
231             if (buffer) {
232                 buffer->returnChunk(*thread_chunk.chunk);
233             }
234             thread_chunk.chunk = nullptr;
235         }
236         registered_chunk_tenants.erase(&thread_chunk);
237         thread_chunk.initialised = false;
238 
239         if (isEnabled()) {
240             deregistered_threads.emplace(platform::getCurrentThreadIDCached());
241         } else {
242             thread_names.erase(platform::getCurrentThreadIDCached());
243         }
244     }
245 
getTraceConfig() const246     TraceConfig TraceLog::getTraceConfig() const {
247         std::lock_guard<std::mutex> lh(mutex);
248         return trace_config;
249     }
250 
getStats(StatsCallback& addStats) const251     void TraceLog::getStats(StatsCallback& addStats) const {
252         std::lock_guard<std::mutex> lh(mutex);
253         using gsl_p::make_span;
254 
255         registry.getStats(addStats);
256         if (buffer) {
257             buffer->getStats(addStats);
258         }
259 
260         addStats("log_is_enabled", isEnabled());
261         addStats("log_has_buffer", buffer != nullptr);
262         addStats("log_thread_names", thread_names.size());
263         addStats("log_deregistered_threads", deregistered_threads.size());
264         addStats("log_registered_tenants", registered_chunk_tenants.size());
265     }
266 
getChunkTenant()267     std::unique_lock<ChunkTenant> TraceLog::getChunkTenant() {
268         std::unique_lock<ChunkTenant> cl{thread_chunk, std::try_to_lock};
269 
270         // If we didn't acquire the lock then we're stopping so bail out
271         if (!cl) {
272             return {};
273         }
274 
275         if (!thread_chunk.chunk || thread_chunk.chunk->isFull()) {
276             // If we're missing our chunk then it might be because we're
277             // meant to be stopping right now.
278             if (!enabled) {
279                 return {};
280             }
281 
282             if (!replaceChunk(thread_chunk)) {
283                 size_t current = generation;
284                 cl.unlock();
285                 maybe_stop(current);
286                 return {};
287             }
288         }
289 
290         return cl;
291     }
292 
replaceChunk(ChunkTenant& ct)293     bool TraceLog::replaceChunk(ChunkTenant& ct) {
294         // Temporary addition until ChunkTenant initialisation
295         // is guaranteed by C++11 `thread_local`
296         if (!ct.initialised) {
297             registerThread();
298         }
299 
300         if (ct.chunk) {
301             buffer->returnChunk(*ct.chunk);
302             ct.chunk = nullptr;
303         }
304         return enabled && buffer && (ct.chunk = buffer->getChunk());
305     }
306 
evictThreads(std::lock_guard<TraceLog>& lh)307     void TraceLog::evictThreads(std::lock_guard<TraceLog>& lh) {
308         for (auto* chunk_tenant : registered_chunk_tenants) {
309             chunk_tenant->lck.master().lock();
310             chunk_tenant->chunk = nullptr;
311             chunk_tenant->lck.master().unlock();
312         }
313     }
314 
clearDeregisteredThreads()315     void TraceLog::clearDeregisteredThreads() {
316         for (const auto& tid : deregistered_threads) {
317             thread_names.erase(tid);
318         }
319     }
320 
321 }
322