15 /* Returns microseconds since epoch */
16 uint64_t timestamp_now()
18 return std::chrono::duration_cast<std::chrono::microseconds>(
19 std::chrono::high_resolution_clock::now().time_since_epoch())
23 /* I want [2016-10-13 00:01:23.528514] */
24 void format_timestamp(std::ostream& os, uint64_t timestamp)
26 // The next 3 lines do not work on MSVC!
27 // auto duration = std::chrono::microseconds(timestamp);
28 // std::chrono::high_resolution_clock::time_point time_point(duration);
29 // std::time_t time_t =
30 // std::chrono::high_resolution_clock::to_time_t(time_point);
31 std::time_t time_t = timestamp / 1000000;
32 auto gmtime = std::gmtime(&time_t);
34 strftime(buffer, 32, "%Y-%m-%d %T.", gmtime);
36 sprintf(microseconds, "%06llu", (long long unsigned int)timestamp % 1000000);
37 os << '[' << buffer << microseconds << ']';
40 std::thread::id this_thread_id()
42 static thread_local const std::thread::id id = std::this_thread::get_id();
46 template <typename T, typename Tuple>
49 template <typename T, typename... Types>
50 struct TupleIndex<T, std::tuple<T, Types...> > {
51 static constexpr const std::size_t value = 0;
54 template <typename T, typename U, typename... Types>
55 struct TupleIndex<T, std::tuple<U, Types...> > {
56 static constexpr const std::size_t value =
57 1 + TupleIndex<T, std::tuple<Types...> >::value;
60 } // anonymous namespace
62 typedef std::tuple<char, uint32_t, uint64_t, int32_t, int64_t, double,
63 VDebug::string_literal_t, char*>
66 char const* to_string(LogLevel loglevel)
81 template <typename Arg>
82 void VDebug::encode(Arg arg)
84 *reinterpret_cast<Arg*>(buffer()) = arg;
85 m_bytes_used += sizeof(Arg);
88 template <typename Arg>
89 void VDebug::encode(Arg arg, uint8_t type_id)
91 resize_buffer_if_needed(sizeof(Arg) + sizeof(uint8_t));
92 encode<uint8_t>(type_id);
96 VDebug::VDebug(LogLevel level, char const* file, char const* function,
98 : m_bytes_used(0), m_buffer_size(sizeof(m_stack_buffer))
100 encode<uint64_t>(timestamp_now());
101 encode<std::thread::id>(this_thread_id());
102 encode<string_literal_t>(string_literal_t(file));
103 encode<string_literal_t>(string_literal_t(function));
104 encode<uint32_t>(line);
105 encode<LogLevel>(level);
106 if (level == LogLevel::INFO) {
113 VDebug::~VDebug() = default;
115 void VDebug::stringify(std::ostream& os)
117 char* b = !m_heap_buffer ? m_stack_buffer : m_heap_buffer.get();
118 char const* const end = b + m_bytes_used;
119 uint64_t timestamp = *reinterpret_cast<uint64_t*>(b);
120 b += sizeof(uint64_t);
121 std::thread::id threadid = *reinterpret_cast<std::thread::id*>(b);
122 b += sizeof(std::thread::id);
123 string_literal_t file = *reinterpret_cast<string_literal_t*>(b);
124 b += sizeof(string_literal_t);
125 string_literal_t function = *reinterpret_cast<string_literal_t*>(b);
126 b += sizeof(string_literal_t);
127 uint32_t line = *reinterpret_cast<uint32_t*>(b);
128 b += sizeof(uint32_t);
129 LogLevel loglevel = *reinterpret_cast<LogLevel*>(b);
130 b += sizeof(LogLevel);
132 format_timestamp(os, timestamp);
134 os << '[' << to_string(loglevel) << ']' << '[' << threadid << ']' << '['
135 << file.m_s << ':' << function.m_s << ':' << line << "] ";
138 stringify(os, b, end);
141 if (loglevel >= LogLevel::CRIT) os.flush();
144 template <typename Arg>
145 char* decode(std::ostream& os, char* b, Arg* dummy)
147 Arg arg = *reinterpret_cast<Arg*>(b);
149 return b + sizeof(Arg);
153 char* decode(std::ostream& os, char* b, VDebug::string_literal_t* dummy)
155 VDebug::string_literal_t s =
156 *reinterpret_cast<VDebug::string_literal_t*>(b);
158 return b + sizeof(VDebug::string_literal_t);
162 char* decode(std::ostream& os, char* b, char** dummy)
171 void VDebug::stringify(std::ostream& os, char* start, char const* const end)
173 if (start == end) return;
175 int type_id = static_cast<int>(*start);
183 static_cast<std::tuple_element<0, SupportedTypes>::type*>(
191 static_cast<std::tuple_element<1, SupportedTypes>::type*>(
199 static_cast<std::tuple_element<2, SupportedTypes>::type*>(
207 static_cast<std::tuple_element<3, SupportedTypes>::type*>(
215 static_cast<std::tuple_element<4, SupportedTypes>::type*>(
223 static_cast<std::tuple_element<5, SupportedTypes>::type*>(
231 static_cast<std::tuple_element<6, SupportedTypes>::type*>(
239 static_cast<std::tuple_element<7, SupportedTypes>::type*>(
246 char* VDebug::buffer()
248 return !m_heap_buffer ? &m_stack_buffer[m_bytes_used]
249 : &(m_heap_buffer.get())[m_bytes_used];
252 void VDebug::resize_buffer_if_needed(size_t additional_bytes)
254 size_t const required_size = m_bytes_used + additional_bytes;
256 if (required_size <= m_buffer_size) return;
258 if (!m_heap_buffer) {
259 m_buffer_size = std::max(static_cast<size_t>(512), required_size);
260 m_heap_buffer = std::make_unique<char[]>(m_buffer_size);
261 memcpy(m_heap_buffer.get(), m_stack_buffer, m_bytes_used);
265 std::max(static_cast<size_t>(2 * m_buffer_size), required_size);
266 std::unique_ptr<char[]> new_heap_buffer(new char[m_buffer_size]);
267 memcpy(new_heap_buffer.get(), m_heap_buffer.get(), m_bytes_used);
268 m_heap_buffer.swap(new_heap_buffer);
272 void VDebug::encode(char const* arg)
274 if (arg != nullptr) encode_c_string(arg, strlen(arg));
277 void VDebug::encode(char* arg)
279 if (arg != nullptr) encode_c_string(arg, strlen(arg));
282 void VDebug::encode_c_string(char const* arg, size_t length)
284 if (length == 0) return;
286 resize_buffer_if_needed(1 + length + 1);
288 auto type_id = TupleIndex<char*, SupportedTypes>::value;
289 *reinterpret_cast<uint8_t*>(b++) = static_cast<uint8_t>(type_id);
290 memcpy(b, arg, length + 1);
291 m_bytes_used += 1 + length + 1;
294 void VDebug::encode(string_literal_t arg)
296 encode<string_literal_t>(
297 arg, TupleIndex<string_literal_t, SupportedTypes>::value);
300 VDebug& VDebug::operator<<(std::string const& arg)
302 encode_c_string(arg.c_str(), arg.length());
306 VDebug& VDebug::operator<<(int32_t arg)
308 encode<int32_t>(arg, TupleIndex<int32_t, SupportedTypes>::value);
312 VDebug& VDebug::operator<<(uint32_t arg)
314 encode<uint32_t>(arg, TupleIndex<uint32_t, SupportedTypes>::value);
318 // VDebug& VDebug::operator<<(int64_t arg)
320 // encode < int64_t >(arg, TupleIndex < int64_t, SupportedTypes >::value);
324 // VDebug& VDebug::operator<<(uint64_t arg)
326 // encode < uint64_t >(arg, TupleIndex < uint64_t, SupportedTypes >::value);
329 VDebug& VDebug::operator<<(unsigned long arg)
331 encode<uint64_t>(arg, TupleIndex<uint64_t, SupportedTypes>::value);
335 VDebug& VDebug::operator<<(long arg)
337 encode<int64_t>(arg, TupleIndex<int64_t, SupportedTypes>::value);
341 VDebug& VDebug::operator<<(double arg)
343 encode<double>(arg, TupleIndex<double, SupportedTypes>::value);
347 VDebug& VDebug::operator<<(char arg)
349 encode<char>(arg, TupleIndex<char, SupportedTypes>::value);
354 virtual ~BufferBase() = default;
355 virtual void push(VDebug&& logline) = 0;
356 virtual bool try_pop(VDebug& logline) = 0;
360 SpinLock(std::atomic_flag& flag) : m_flag(flag)
362 while (m_flag.test_and_set(std::memory_order_acquire))
366 ~SpinLock() { m_flag.clear(std::memory_order_release); }
369 std::atomic_flag& m_flag;
372 /* Multi Producer Single Consumer Ring Buffer */
373 class RingBuffer : public BufferBase {
375 struct alignas(64) Item {
377 : flag(), written(0), logline(LogLevel::INFO, nullptr, nullptr, 0)
381 std::atomic_flag flag;
383 char padding[256 - sizeof(std::atomic_flag) - sizeof(char) -
388 RingBuffer(size_t const size)
390 m_ring(static_cast<Item*>(std::malloc(size * sizeof(Item)))),
394 for (size_t i = 0; i < m_size; ++i) {
395 new (&m_ring[i]) Item();
397 static_assert(sizeof(Item) == 256, "Unexpected size != 256");
400 ~RingBuffer() override
402 for (size_t i = 0; i < m_size; ++i) {
408 void push(VDebug&& logline) override
410 unsigned int write_index =
411 m_write_index.fetch_add(1, std::memory_order_relaxed) % m_size;
412 Item& item = m_ring[write_index];
413 SpinLock spinlock(item.flag);
414 item.logline = std::move(logline);
418 bool try_pop(VDebug& logline) override
420 Item& item = m_ring[m_read_index % m_size];
421 SpinLock spinlock(item.flag);
422 if (item.written == 1) {
423 logline = std::move(item.logline);
431 RingBuffer(RingBuffer const&) = delete;
432 RingBuffer& operator=(RingBuffer const&) = delete;
437 std::atomic<unsigned int> m_write_index;
439 unsigned int m_read_index;
445 Item(VDebug&& logline) : logline(std::move(logline)) {}
446 char padding[256 - sizeof(VDebug)];
450 static constexpr const size_t size =
451 32768; // 8MB. Helps reduce memory fragmentation
453 Buffer() : m_buffer(static_cast<Item*>(std::malloc(size * sizeof(Item))))
455 for (size_t i = 0; i <= size; ++i) {
456 m_write_state[i].store(0, std::memory_order_relaxed);
458 static_assert(sizeof(Item) == 256, "Unexpected size != 256");
463 unsigned int write_count = m_write_state[size].load();
464 for (size_t i = 0; i < write_count; ++i) {
470 // Returns true if we need to switch to next buffer
471 bool push(VDebug&& logline, unsigned int const write_index)
473 new (&m_buffer[write_index]) Item(std::move(logline));
474 m_write_state[write_index].store(1, std::memory_order_release);
475 return m_write_state[size].fetch_add(1, std::memory_order_acquire) +
480 bool try_pop(VDebug& logline, unsigned int const read_index)
482 if (m_write_state[read_index].load(std::memory_order_acquire)) {
483 Item& item = m_buffer[read_index];
484 logline = std::move(item.logline);
490 Buffer(Buffer const&) = delete;
491 Buffer& operator=(Buffer const&) = delete;
495 std::atomic<unsigned int> m_write_state[size + 1];
498 class QueueBuffer : public BufferBase {
500 QueueBuffer(QueueBuffer const&) = delete;
501 QueueBuffer& operator=(QueueBuffer const&) = delete;
504 : m_current_read_buffer{nullptr},
509 setup_next_write_buffer();
512 void push(VDebug&& logline) override
514 unsigned int write_index =
515 m_write_index.fetch_add(1, std::memory_order_relaxed);
516 if (write_index < Buffer::size) {
517 if (m_current_write_buffer.load(std::memory_order_acquire)
518 ->push(std::move(logline), write_index)) {
519 setup_next_write_buffer();
522 while (m_write_index.load(std::memory_order_acquire) >=
525 push(std::move(logline));
529 bool try_pop(VDebug& logline) override
531 if (m_current_read_buffer == nullptr)
532 m_current_read_buffer = get_next_read_buffer();
534 Buffer* read_buffer = m_current_read_buffer;
536 if (read_buffer == nullptr) return false;
538 if (read_buffer->try_pop(logline, m_read_index)) {
540 if (m_read_index == Buffer::size) {
542 m_current_read_buffer = nullptr;
543 SpinLock spinlock(m_flag);
553 void setup_next_write_buffer()
555 std::unique_ptr<Buffer> next_write_buffer(new Buffer());
556 m_current_write_buffer.store(next_write_buffer.get(),
557 std::memory_order_release);
558 SpinLock spinlock(m_flag);
559 m_buffers.push(std::move(next_write_buffer));
560 m_write_index.store(0, std::memory_order_relaxed);
563 Buffer* get_next_read_buffer()
565 SpinLock spinlock(m_flag);
566 return m_buffers.empty() ? nullptr : m_buffers.front().get();
570 std::queue<std::unique_ptr<Buffer> > m_buffers;
571 std::atomic<Buffer*> m_current_write_buffer;
572 Buffer* m_current_read_buffer;
573 std::atomic<unsigned int> m_write_index;
574 std::atomic_flag m_flag;
575 unsigned int m_read_index;
580 FileWriter(std::string const& log_directory,
581 std::string const& log_file_name, uint32_t log_file_roll_size_mb)
582 : m_log_file_roll_size_bytes(log_file_roll_size_mb * 1024 * 1024),
583 m_name(log_directory + log_file_name)
588 void write(VDebug& logline)
590 auto pos = m_os->tellp();
591 logline.stringify(*m_os);
592 m_bytes_written += m_os->tellp() - pos;
593 if (m_bytes_written > m_log_file_roll_size_bytes) {
607 m_os = std::make_unique<std::ofstream>();
608 // TODO Optimize this part. Does it even matter ?
609 std::string log_file_name = m_name;
610 log_file_name.append(".");
611 log_file_name.append(std::to_string(++m_file_number));
612 log_file_name.append(".txt");
613 m_os->open(log_file_name, std::ofstream::out | std::ofstream::trunc);
617 uint32_t m_file_number = 0;
618 std::streamoff m_bytes_written = 0;
619 uint32_t const m_log_file_roll_size_bytes;
620 std::string const m_name;
621 std::unique_ptr<std::ofstream> m_os;
626 NanoLogger(NonGuaranteedLogger ngl, std::string const& log_directory,
627 std::string const& log_file_name, uint32_t log_file_roll_size_mb)
628 : m_state(State::INIT),
630 new RingBuffer(std::max(1u, ngl.ring_buffer_size_mb) * 1024 * 4)),
631 m_file_writer(log_directory, log_file_name,
632 std::max(1u, log_file_roll_size_mb)),
633 m_thread(&NanoLogger::pop, this)
635 m_state.store(State::READY, std::memory_order_release);
638 NanoLogger(GuaranteedLogger gl, std::string const& log_directory,
639 std::string const& log_file_name, uint32_t log_file_roll_size_mb)
640 : m_state(State::INIT),
641 m_buffer_base(new QueueBuffer()),
642 m_file_writer(log_directory, log_file_name,
643 std::max(1u, log_file_roll_size_mb)),
644 m_thread(&NanoLogger::pop, this)
646 m_state.store(State::READY, std::memory_order_release);
651 m_state.store(State::SHUTDOWN);
655 void add(VDebug&& logline) { m_buffer_base->push(std::move(logline)); }
659 // Wait for constructor to complete and pull all stores done there to
660 // this thread / core.
661 while (m_state.load(std::memory_order_acquire) == State::INIT)
662 std::this_thread::sleep_for(std::chrono::microseconds(50));
664 VDebug logline(LogLevel::INFO, nullptr, nullptr, 0);
666 while (m_state.load() == State::READY) {
667 if (m_buffer_base->try_pop(logline))
668 m_file_writer.write(logline);
670 std::this_thread::sleep_for(std::chrono::microseconds(50));
673 // Pop and log all remaining entries
674 while (m_buffer_base->try_pop(logline)) {
675 m_file_writer.write(logline);
680 enum class State { INIT, READY, SHUTDOWN };
682 std::atomic<State> m_state;
683 std::unique_ptr<BufferBase> m_buffer_base;
684 FileWriter m_file_writer;
685 std::thread m_thread;
688 std::unique_ptr<NanoLogger> nanologger;
689 std::atomic<NanoLogger*> atomic_nanologger;
691 bool VDebugServer::operator==(VDebug& logline)
693 atomic_nanologger.load(std::memory_order_acquire)->add(std::move(logline));
697 void initialize(NonGuaranteedLogger ngl, std::string const& log_directory,
698 std::string const& log_file_name,
699 uint32_t log_file_roll_size_mb)
701 nanologger = std::make_unique<NanoLogger>(ngl, log_directory, log_file_name,
702 log_file_roll_size_mb);
703 atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
706 void initialize(GuaranteedLogger gl, std::string const& log_directory,
707 std::string const& log_file_name,
708 uint32_t log_file_roll_size_mb)
710 nanologger = std::make_unique<NanoLogger>(gl, log_directory, log_file_name,
711 log_file_roll_size_mb);
712 atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
715 std::atomic<unsigned int> loglevel = {0};
717 void set_log_level(LogLevel level)
719 loglevel.store(static_cast<unsigned int>(level), std::memory_order_release);
722 bool is_logged(LogLevel level)
724 return static_cast<unsigned int>(level) >=
725 loglevel.load(std::memory_order_relaxed);