2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
4 * Licensed under the Flora License, Version 1.1 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://floralicense.org/license/
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 /* Returns microseconds since epoch */
32 uint64_t timestamp_now()
34 return std::chrono::duration_cast<std::chrono::microseconds>(
35 std::chrono::high_resolution_clock::now().time_since_epoch())
39 /* I want [2016-10-13 00:01:23.528514] */
40 void format_timestamp(std::ostream& os, uint64_t timestamp)
42 // The next 3 lines do not work on MSVC!
43 // auto duration = std::chrono::microseconds(timestamp);
44 // std::chrono::high_resolution_clock::time_point time_point(duration);
45 // std::time_t time_t =
46 // std::chrono::high_resolution_clock::to_time_t(time_point);
47 std::time_t time_t = timestamp / 1000000;
48 auto gmtime = std::gmtime(&time_t);
50 strftime(buffer, 32, "%Y-%m-%d %T.", gmtime);
52 snprintf(microseconds, 7, "%06llu", (long long unsigned int)timestamp % 1000000);
53 os << '[' << buffer << microseconds << ']';
56 std::thread::id this_thread_id()
58 static thread_local const std::thread::id id = std::this_thread::get_id();
62 template <typename T, typename Tuple>
65 template <typename T, typename... Types>
66 struct TupleIndex<T, std::tuple<T, Types...> > {
67 static constexpr const std::size_t value = 0;
70 template <typename T, typename U, typename... Types>
71 struct TupleIndex<T, std::tuple<U, Types...> > {
72 static constexpr const std::size_t value =
73 1 + TupleIndex<T, std::tuple<Types...> >::value;
76 } // anonymous namespace
78 typedef std::tuple<char, uint32_t, uint64_t, int32_t, int64_t, double,
79 VDebug::string_literal_t, char*>
82 char const* to_string(LogLevel loglevel)
97 template <typename Arg>
98 void VDebug::encode(Arg arg)
100 *reinterpret_cast<Arg*>(buffer()) = arg;
101 m_bytes_used += sizeof(Arg);
104 template <typename Arg>
105 void VDebug::encode(Arg arg, uint8_t type_id)
107 resize_buffer_if_needed(sizeof(Arg) + sizeof(uint8_t));
108 encode<uint8_t>(type_id);
112 VDebug::VDebug(LogLevel level, char const* file, char const* function,
114 : m_bytes_used(0), m_buffer_size(sizeof(m_stack_buffer))
116 encode<uint64_t>(timestamp_now());
117 encode<std::thread::id>(this_thread_id());
118 encode<string_literal_t>(string_literal_t(file));
119 encode<string_literal_t>(string_literal_t(function));
120 encode<uint32_t>(line);
121 encode<LogLevel>(level);
122 if (level == LogLevel::INFO) {
129 VDebug::~VDebug() = default;
131 void VDebug::stringify(std::ostream& os)
133 char* b = !m_heap_buffer ? m_stack_buffer : m_heap_buffer.get();
134 char const* const end = b + m_bytes_used;
135 uint64_t timestamp = *reinterpret_cast<uint64_t*>(b);
136 b += sizeof(uint64_t);
137 std::thread::id threadid = *reinterpret_cast<std::thread::id*>(b);
138 b += sizeof(std::thread::id);
139 string_literal_t file = *reinterpret_cast<string_literal_t*>(b);
140 b += sizeof(string_literal_t);
141 string_literal_t function = *reinterpret_cast<string_literal_t*>(b);
142 b += sizeof(string_literal_t);
143 uint32_t line = *reinterpret_cast<uint32_t*>(b);
144 b += sizeof(uint32_t);
145 LogLevel loglevel = *reinterpret_cast<LogLevel*>(b);
146 b += sizeof(LogLevel);
148 format_timestamp(os, timestamp);
150 os << '[' << to_string(loglevel) << ']' << '[' << threadid << ']' << '['
151 << file.m_s << ':' << function.m_s << ':' << line << "] ";
154 stringify(os, b, end);
157 if (loglevel >= LogLevel::CRIT) os.flush();
160 template <typename Arg>
161 char* decode(std::ostream& os, char* b, Arg* dummy)
163 Arg arg = *reinterpret_cast<Arg*>(b);
165 return b + sizeof(Arg);
169 char* decode(std::ostream& os, char* b, VDebug::string_literal_t* dummy)
171 VDebug::string_literal_t s =
172 *reinterpret_cast<VDebug::string_literal_t*>(b);
174 return b + sizeof(VDebug::string_literal_t);
178 char* decode(std::ostream& os, char* b, char** dummy)
187 void VDebug::stringify(std::ostream& os, char* start, char const* const end)
189 if (start == end) return;
191 int type_id = static_cast<int>(*start);
199 static_cast<std::tuple_element<0, SupportedTypes>::type*>(
207 static_cast<std::tuple_element<1, SupportedTypes>::type*>(
215 static_cast<std::tuple_element<2, SupportedTypes>::type*>(
223 static_cast<std::tuple_element<3, SupportedTypes>::type*>(
231 static_cast<std::tuple_element<4, SupportedTypes>::type*>(
239 static_cast<std::tuple_element<5, SupportedTypes>::type*>(
247 static_cast<std::tuple_element<6, SupportedTypes>::type*>(
255 static_cast<std::tuple_element<7, SupportedTypes>::type*>(
262 char* VDebug::buffer()
264 return !m_heap_buffer ? &m_stack_buffer[m_bytes_used]
265 : &(m_heap_buffer.get())[m_bytes_used];
268 void VDebug::resize_buffer_if_needed(size_t additional_bytes)
270 size_t const required_size = m_bytes_used + additional_bytes;
272 if (required_size <= m_buffer_size) return;
274 if (!m_heap_buffer) {
275 m_buffer_size = std::max(static_cast<size_t>(512), required_size);
276 m_heap_buffer = std::make_unique<char[]>(m_buffer_size);
277 memcpy(m_heap_buffer.get(), m_stack_buffer, m_bytes_used);
281 std::max(static_cast<size_t>(2 * m_buffer_size), required_size);
282 std::unique_ptr<char[]> new_heap_buffer(new char[m_buffer_size]);
283 memcpy(new_heap_buffer.get(), m_heap_buffer.get(), m_bytes_used);
284 m_heap_buffer.swap(new_heap_buffer);
288 void VDebug::encode(char const* arg)
290 if (arg != nullptr) encode_c_string(arg, strlen(arg));
293 void VDebug::encode(char* arg)
295 if (arg != nullptr) encode_c_string(arg, strlen(arg));
298 void VDebug::encode_c_string(char const* arg, size_t length)
300 if (length == 0) return;
302 resize_buffer_if_needed(1 + length + 1);
304 auto type_id = TupleIndex<char*, SupportedTypes>::value;
305 *reinterpret_cast<uint8_t*>(b++) = static_cast<uint8_t>(type_id);
306 memcpy(b, arg, length + 1);
307 m_bytes_used += 1 + length + 1;
310 void VDebug::encode(string_literal_t arg)
312 encode<string_literal_t>(
313 arg, TupleIndex<string_literal_t, SupportedTypes>::value);
316 VDebug& VDebug::operator<<(std::string const& arg)
318 encode_c_string(arg.c_str(), arg.length());
322 VDebug& VDebug::operator<<(int32_t arg)
324 encode<int32_t>(arg, TupleIndex<int32_t, SupportedTypes>::value);
328 VDebug& VDebug::operator<<(uint32_t arg)
330 encode<uint32_t>(arg, TupleIndex<uint32_t, SupportedTypes>::value);
334 // VDebug& VDebug::operator<<(int64_t arg)
336 // encode < int64_t >(arg, TupleIndex < int64_t, SupportedTypes >::value);
340 // VDebug& VDebug::operator<<(uint64_t arg)
342 // encode < uint64_t >(arg, TupleIndex < uint64_t, SupportedTypes >::value);
345 VDebug& VDebug::operator<<(unsigned long arg)
347 encode<uint64_t>(arg, TupleIndex<uint64_t, SupportedTypes>::value);
351 VDebug& VDebug::operator<<(long arg)
353 encode<int64_t>(arg, TupleIndex<int64_t, SupportedTypes>::value);
357 VDebug& VDebug::operator<<(double arg)
359 encode<double>(arg, TupleIndex<double, SupportedTypes>::value);
363 VDebug& VDebug::operator<<(char arg)
365 encode<char>(arg, TupleIndex<char, SupportedTypes>::value);
370 virtual ~BufferBase() = default;
371 virtual void push(VDebug&& logline) = 0;
372 virtual bool try_pop(VDebug& logline) = 0;
376 SpinLock(std::atomic_flag& flag) : m_flag(flag)
378 while (m_flag.test_and_set(std::memory_order_acquire))
382 ~SpinLock() { m_flag.clear(std::memory_order_release); }
385 std::atomic_flag& m_flag;
388 /* Multi Producer Single Consumer Ring Buffer */
389 class RingBuffer : public BufferBase {
391 struct alignas(64) Item {
393 : flag(), written(0), logline(LogLevel::INFO, nullptr, nullptr, 0)
397 std::atomic_flag flag;
399 char padding[256 - sizeof(std::atomic_flag) - sizeof(char) -
404 RingBuffer(size_t const size)
406 m_ring(static_cast<Item*>(std::malloc(size * sizeof(Item)))),
410 for (size_t i = 0; i < m_size; ++i) {
411 new (&m_ring[i]) Item();
413 static_assert(sizeof(Item) == 256, "Unexpected size != 256");
416 ~RingBuffer() override
418 for (size_t i = 0; i < m_size; ++i) {
424 void push(VDebug&& logline) override
426 unsigned int write_index =
427 m_write_index.fetch_add(1, std::memory_order_relaxed) % m_size;
428 Item& item = m_ring[write_index];
429 SpinLock spinlock(item.flag);
430 item.logline = std::move(logline);
434 bool try_pop(VDebug& logline) override
436 Item& item = m_ring[m_read_index % m_size];
437 SpinLock spinlock(item.flag);
438 if (item.written == 1) {
439 logline = std::move(item.logline);
447 RingBuffer(RingBuffer const&) = delete;
448 RingBuffer& operator=(RingBuffer const&) = delete;
453 std::atomic<unsigned int> m_write_index;
455 unsigned int m_read_index;
461 Item(VDebug&& logline) : logline(std::move(logline)) {}
462 char padding[256 - sizeof(VDebug)];
466 static constexpr const size_t size =
467 32768; // 8MB. Helps reduce memory fragmentation
469 Buffer() : m_buffer(static_cast<Item*>(std::malloc(size * sizeof(Item))))
471 for (size_t i = 0; i <= size; ++i) {
472 m_write_state[i].store(0, std::memory_order_relaxed);
474 static_assert(sizeof(Item) == 256, "Unexpected size != 256");
479 unsigned int write_count = m_write_state[size].load();
480 for (size_t i = 0; i < write_count; ++i) {
486 // Returns true if we need to switch to next buffer
487 bool push(VDebug&& logline, unsigned int const write_index)
489 new (&m_buffer[write_index]) Item(std::move(logline));
490 m_write_state[write_index].store(1, std::memory_order_release);
491 return m_write_state[size].fetch_add(1, std::memory_order_acquire) +
496 bool try_pop(VDebug& logline, unsigned int const read_index)
498 if (m_write_state[read_index].load(std::memory_order_acquire)) {
499 Item& item = m_buffer[read_index];
500 logline = std::move(item.logline);
506 Buffer(Buffer const&) = delete;
507 Buffer& operator=(Buffer const&) = delete;
511 std::atomic<unsigned int> m_write_state[size + 1];
514 class QueueBuffer : public BufferBase {
516 QueueBuffer(QueueBuffer const&) = delete;
517 QueueBuffer& operator=(QueueBuffer const&) = delete;
520 : m_current_read_buffer{nullptr},
525 setup_next_write_buffer();
528 void push(VDebug&& logline) override
530 unsigned int write_index =
531 m_write_index.fetch_add(1, std::memory_order_relaxed);
532 if (write_index < Buffer::size) {
533 if (m_current_write_buffer.load(std::memory_order_acquire)
534 ->push(std::move(logline), write_index)) {
535 setup_next_write_buffer();
538 while (m_write_index.load(std::memory_order_acquire) >=
541 push(std::move(logline));
545 bool try_pop(VDebug& logline) override
547 if (m_current_read_buffer == nullptr)
548 m_current_read_buffer = get_next_read_buffer();
550 Buffer* read_buffer = m_current_read_buffer;
552 if (read_buffer == nullptr) return false;
554 if (read_buffer->try_pop(logline, m_read_index)) {
556 if (m_read_index == Buffer::size) {
558 m_current_read_buffer = nullptr;
559 SpinLock spinlock(m_flag);
569 void setup_next_write_buffer()
571 std::unique_ptr<Buffer> next_write_buffer(new Buffer());
572 m_current_write_buffer.store(next_write_buffer.get(),
573 std::memory_order_release);
574 SpinLock spinlock(m_flag);
575 m_buffers.push(std::move(next_write_buffer));
576 m_write_index.store(0, std::memory_order_relaxed);
579 Buffer* get_next_read_buffer()
581 SpinLock spinlock(m_flag);
582 return m_buffers.empty() ? nullptr : m_buffers.front().get();
586 std::queue<std::unique_ptr<Buffer> > m_buffers;
587 std::atomic<Buffer*> m_current_write_buffer;
588 Buffer* m_current_read_buffer;
589 std::atomic<unsigned int> m_write_index;
590 std::atomic_flag m_flag;
591 unsigned int m_read_index;
596 FileWriter(std::string const& log_directory,
597 std::string const& log_file_name, uint32_t log_file_roll_size_mb)
598 : m_log_file_roll_size_bytes(log_file_roll_size_mb * 1024 * 1024),
599 m_name(log_directory + log_file_name)
604 void write(VDebug& logline)
606 auto pos = m_os->tellp();
607 logline.stringify(*m_os);
608 m_bytes_written += m_os->tellp() - pos;
609 if (m_bytes_written > m_log_file_roll_size_bytes) {
623 m_os = std::make_unique<std::ofstream>();
624 // TODO Optimize this part. Does it even matter ?
625 std::string log_file_name = m_name;
626 log_file_name.append(".");
627 log_file_name.append(std::to_string(++m_file_number));
628 log_file_name.append(".txt");
629 m_os->open(log_file_name, std::ofstream::out | std::ofstream::trunc);
633 uint32_t m_file_number = 0;
634 std::streamoff m_bytes_written = 0;
635 uint32_t const m_log_file_roll_size_bytes;
636 std::string const m_name;
637 std::unique_ptr<std::ofstream> m_os;
642 NanoLogger(NonGuaranteedLogger ngl, std::string const& log_directory,
643 std::string const& log_file_name, uint32_t log_file_roll_size_mb)
644 : m_state(State::INIT),
646 new RingBuffer(std::max(1u, ngl.ring_buffer_size_mb) * 1024 * 4)),
647 m_file_writer(log_directory, log_file_name,
648 std::max(1u, log_file_roll_size_mb)),
649 m_thread(&NanoLogger::pop, this)
651 m_state.store(State::READY, std::memory_order_release);
654 NanoLogger(GuaranteedLogger gl, std::string const& log_directory,
655 std::string const& log_file_name, uint32_t log_file_roll_size_mb)
656 : m_state(State::INIT),
657 m_buffer_base(new QueueBuffer()),
658 m_file_writer(log_directory, log_file_name,
659 std::max(1u, log_file_roll_size_mb)),
660 m_thread(&NanoLogger::pop, this)
662 m_state.store(State::READY, std::memory_order_release);
667 m_state.store(State::SHUTDOWN);
671 void add(VDebug&& logline) { m_buffer_base->push(std::move(logline)); }
675 // Wait for constructor to complete and pull all stores done there to
676 // this thread / core.
677 while (m_state.load(std::memory_order_acquire) == State::INIT)
678 std::this_thread::sleep_for(std::chrono::microseconds(50));
680 VDebug logline(LogLevel::INFO, nullptr, nullptr, 0);
682 while (m_state.load() == State::READY) {
683 if (m_buffer_base->try_pop(logline))
684 m_file_writer.write(logline);
686 std::this_thread::sleep_for(std::chrono::microseconds(50));
689 // Pop and log all remaining entries
690 while (m_buffer_base->try_pop(logline)) {
691 m_file_writer.write(logline);
696 enum class State { INIT, READY, SHUTDOWN };
698 std::atomic<State> m_state;
699 std::unique_ptr<BufferBase> m_buffer_base;
700 FileWriter m_file_writer;
701 std::thread m_thread;
704 std::unique_ptr<NanoLogger> nanologger;
705 std::atomic<NanoLogger*> atomic_nanologger;
707 bool VDebugServer::operator==(VDebug& logline)
709 atomic_nanologger.load(std::memory_order_acquire)->add(std::move(logline));
713 void initialize(NonGuaranteedLogger ngl, std::string const& log_directory,
714 std::string const& log_file_name,
715 uint32_t log_file_roll_size_mb)
717 nanologger = std::make_unique<NanoLogger>(ngl, log_directory, log_file_name,
718 log_file_roll_size_mb);
719 atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
722 void initialize(GuaranteedLogger gl, std::string const& log_directory,
723 std::string const& log_file_name,
724 uint32_t log_file_roll_size_mb)
726 nanologger = std::make_unique<NanoLogger>(gl, log_directory, log_file_name,
727 log_file_roll_size_mb);
728 atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
731 std::atomic<unsigned int> loglevel = {0};
733 void set_log_level(LogLevel level)
735 loglevel.store(static_cast<unsigned int>(level), std::memory_order_release);
738 bool is_logged(LogLevel level)
740 return static_cast<unsigned int>(level) >=
741 loglevel.load(std::memory_order_relaxed);