1 /* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3  *     Copyright 2016 Couchbase, Inc
4  *
5  *   Licensed under the Apache License, Version 2.0 (the "License");
6  *   you may not use this file except in compliance with the License.
7  *   You may obtain a copy of the License at
8  *
9  *       http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *   Unless required by applicable law or agreed to in writing, software
12  *   distributed under the License is distributed on an "AS IS" BASIS,
13  *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  *   See the License for the specific language governing permissions and
15  *   limitations under the License.
16  */
17 
18 #include <cstring>
19 #include <exception>
20 #include <string>
21 
22 #include "phosphor/platform/thread.h"
23 #include "phosphor/stats_callback.h"
24 #include "phosphor/tools/export.h"
25 #include "phosphor/trace_log.h"
26 #include "utils/memory.h"
27 #include "utils/string_utils.h"
28 
29 namespace phosphor {
30 
31     /*
32      * TraceLog implementation
33      */
34 
35     /**
36      * The thread-specific ChunkTenant used for low contention
37      *
38      * This ChunkTenant is only used when the current thread
39      * has been registered as it requires resources allocated
40      * that are only referred to from thread-local storage.
41      */
42     THREAD_LOCAL ChunkTenant thread_chunk;
43 
TraceLog(const TraceLogConfig& _config)44     TraceLog::TraceLog(const TraceLogConfig& _config)
45         : enabled(false), generation(0) {
46         configure(_config);
47     }
48 
TraceLog()49     TraceLog::TraceLog() : TraceLog(TraceLogConfig().fromEnvironment()) {}
50 
~TraceLog()51     TraceLog::~TraceLog() {
52         stop(true);
53     }
54 
configure(const TraceLogConfig& _config)55     void TraceLog::configure(const TraceLogConfig& _config) {
56         std::lock_guard<TraceLog> lh(*this);
57 
58         if (auto* startup_trace = _config.getStartupTrace()) {
59             start(lh, *startup_trace);
60         }
61     }
62 
getInstance()63     TraceLog& TraceLog::getInstance() {
64         static TraceLog log_instance;
65         return log_instance;
66     }
67 
stop(bool shutdown)68     void TraceLog::stop(bool shutdown) {
69         std::lock_guard<TraceLog> lh(*this);
70         stop(lh, shutdown);
71     }
72 
maybe_stop(size_t _generation)73     void TraceLog::maybe_stop(size_t _generation) {
74         // If we can't acquire the lock then don't bother waiting around
75         // as it probably means another thread is in the middle of shutting
76         // down.
77         if (mutex.try_lock()) {
78             std::lock_guard<TraceLog> lh(*this, std::adopt_lock);
79 
80             // The generation has been updated since we started waiting for
81             // the lock and so we don't need to stop tracing anymore
82             if (generation != _generation) {
83                 return;
84             }
85 
86             stop(lh);
87         }
88     }
89 
stop(std::lock_guard<TraceLog>& lh, bool shutdown)90     void TraceLog::stop(std::lock_guard<TraceLog>& lh, bool shutdown) {
91         if (enabled.exchange(false)) {
92             registry.disableAll();
93             evictThreads(lh);
94             auto* cb = trace_config.getStoppedCallback();
95             if ((cb != nullptr) &&
96                 (!shutdown || trace_config.getStopTracingOnDestruct())) {
97                 (*cb)(*this, lh);
98             }
99         }
100     }
101 
start(const TraceConfig& _trace_config)102     void TraceLog::start(const TraceConfig& _trace_config) {
103         std::lock_guard<TraceLog> lh(*this);
104         start(lh, _trace_config);
105     }
106 
start(std::lock_guard<TraceLog>& lh, const TraceConfig& _trace_config)107     void TraceLog::start(std::lock_guard<TraceLog>& lh,
108                          const TraceConfig& _trace_config) {
109         trace_config = _trace_config;
110 
111         size_t buffer_size = trace_config.getBufferSize() / sizeof(TraceChunk);
112         if (buffer_size == 0) {
113             throw std::invalid_argument(
114                 "Cannot specify a buffer size less than a single chunk (" +
115                 std::to_string(sizeof(TraceChunk)) + " bytes)");
116         }
117 
118         if (enabled) {
119             stop(lh);
120         }
121 
122         buffer = trace_config.getBufferFactory()(generation++, buffer_size);
123         registry.updateEnabled(trace_config.getEnabledCategories(),
124                                trace_config.getDisabledCategories());
125         clearDeregisteredThreads();
126         enabled.store(true);
127     }
128 
getCategoryStatus( const char* category_group)129     const AtomicCategoryStatus& TraceLog::getCategoryStatus(
130         const char* category_group) {
131         return registry.getStatus(category_group);
132     }
133 
getBuffer()134     std::unique_ptr<TraceBuffer> TraceLog::getBuffer() {
135         std::lock_guard<TraceLog> lh(*this);
136         return getBuffer(lh);
137     }
138 
getBuffer( std::lock_guard<TraceLog>&)139     std::unique_ptr<TraceBuffer> TraceLog::getBuffer(
140         std::lock_guard<TraceLog>&) {
141         if (enabled) {
142             throw std::logic_error(
143                 "phosphor::TraceLog::getBuffer: Cannot get the current "
144                 "TraceBuffer while logging is enabled");
145         }
146         return std::move(buffer);
147     }
148 
getTraceContext()149     TraceContext TraceLog::getTraceContext() {
150         std::lock_guard<TraceLog> lh(*this);
151         return getTraceContext(lh);
152     }
153 
getTraceContext(std::lock_guard<TraceLog>&)154     TraceContext TraceLog::getTraceContext(std::lock_guard<TraceLog>&) {
155         if (enabled) {
156             throw std::logic_error(
157                     "phosphor::TraceLog::getTraceContext: Cannot get the "
158                             "TraceContext while logging is enabled");
159         }
160         return TraceContext(std::move(buffer), thread_names);
161     }
162 
isEnabled() const163     bool TraceLog::isEnabled() const {
164         return enabled;
165     }
166 
registerThread(const std::string& thread_name)167     void TraceLog::registerThread(const std::string& thread_name) {
168         std::lock_guard<TraceLog> lh(*this);
169 
170         if (thread_chunk.initialised) {
171             throw std::logic_error("TraceLog::registerThread: Thread is "
172                                    "already registered");
173         }
174 
175         thread_chunk.initialised = true;
176         registered_chunk_tenants.insert(&thread_chunk);
177 
178         if (thread_name != "") {
179             // Unconditionally set the name of the thread, even for the unlikely
180             // event that it is already there.
181             thread_names[platform::getCurrentThreadIDCached()] = thread_name;
182 
183             // Make sure we don't remove our newly registered thread if we
184             // happened to reuse the TID of a thread that's been deregistered.
185             deregistered_threads.erase(platform::getCurrentThreadIDCached());
186         }
187     }
188 
deregisterThread()189     void TraceLog::deregisterThread() {
190         std::lock_guard<TraceLog> lh(*this);
191 
192         if (!thread_chunk.initialised) {
193             throw std::logic_error(
194                 "phosphor::TraceLog::deregisterThread: This thread has "
195                 "not been previously registered");
196         }
197 
198         if (thread_chunk.chunk) {
199             if (buffer) {
200                 buffer->returnChunk(*thread_chunk.chunk);
201             }
202             thread_chunk.chunk = nullptr;
203         }
204         registered_chunk_tenants.erase(&thread_chunk);
205         thread_chunk.initialised = false;
206 
207         if (isEnabled()) {
208             deregistered_threads.emplace(platform::getCurrentThreadIDCached());
209         } else {
210             thread_names.erase(platform::getCurrentThreadIDCached());
211         }
212     }
213 
getTraceConfig() const214     TraceConfig TraceLog::getTraceConfig() const {
215         std::lock_guard<std::mutex> lh(mutex);
216         return trace_config;
217     }
218 
getStats(StatsCallback& addStats) const219     void TraceLog::getStats(StatsCallback& addStats) const {
220         std::lock_guard<std::mutex> lh(mutex);
221         using gsl_p::make_span;
222 
223         registry.getStats(addStats);
224         if (buffer) {
225             buffer->getStats(addStats);
226         }
227 
228         addStats("log_is_enabled", isEnabled());
229         addStats("log_has_buffer", buffer != nullptr);
230         addStats("log_thread_names", thread_names.size());
231         addStats("log_deregistered_threads", deregistered_threads.size());
232         addStats("log_registered_tenants", registered_chunk_tenants.size());
233     }
234 
getChunkTenant()235     std::unique_lock<ChunkTenant> TraceLog::getChunkTenant() {
236         std::unique_lock<ChunkTenant> cl{thread_chunk, std::try_to_lock};
237 
238         // If we didn't acquire the lock then we're stopping so bail out
239         if (!cl) {
240             return {};
241         }
242 
243         if (!thread_chunk.chunk || thread_chunk.chunk->isFull()) {
244             // If we're missing our chunk then it might be because we're
245             // meant to be stopping right now.
246             if (!enabled) {
247                 return {};
248             }
249 
250             if (!replaceChunk(thread_chunk)) {
251                 size_t current = generation;
252                 cl.unlock();
253                 maybe_stop(current);
254                 return {};
255             }
256         }
257 
258         return cl;
259     }
260 
replaceChunk(ChunkTenant& ct)261     bool TraceLog::replaceChunk(ChunkTenant& ct) {
262         // Temporary addition until ChunkTenant initialisation
263         // is guaranteed by C++11 `thread_local`
264         if (!ct.initialised) {
265             registerThread();
266         }
267 
268         if (ct.chunk) {
269             buffer->returnChunk(*ct.chunk);
270             ct.chunk = nullptr;
271         }
272         return enabled && buffer && (ct.chunk = buffer->getChunk());
273     }
274 
evictThreads(std::lock_guard<TraceLog>& lh)275     void TraceLog::evictThreads(std::lock_guard<TraceLog>& lh) {
276         for (auto* chunk_tenant : registered_chunk_tenants) {
277             chunk_tenant->lck.master().lock();
278             chunk_tenant->chunk = nullptr;
279             chunk_tenant->lck.master().unlock();
280         }
281     }
282 
clearDeregisteredThreads()283     void TraceLog::clearDeregisteredThreads() {
284         for (const auto& tid : deregistered_threads) {
285             thread_names.erase(tid);
286         }
287     }
288 
289 }
290