faa74b1c53661344a7cce738bc3cd7b2762a564f
[platform/core/uifw/lottie-player.git] / src / vector / vdebug.cpp
1 #include "vdebug.h"
2 #include <atomic>
3 #include <chrono>
4 #include <cstring>
5 #include <ctime>
6 #include <fstream>
7 #include <memory>
8 #include <queue>
9 #include <sstream>
10 #include <thread>
11 #include <tuple>
12
13 namespace {
14
15 /* Returns microseconds since epoch */
16 uint64_t timestamp_now()
17 {
18     return std::chrono::duration_cast<std::chrono::microseconds>(
19                std::chrono::high_resolution_clock::now().time_since_epoch())
20         .count();
21 }
22
23 /* I want [2016-10-13 00:01:23.528514] */
24 void format_timestamp(std::ostream& os, uint64_t timestamp)
25 {
26     // The next 3 lines do not work on MSVC!
27     // auto duration = std::chrono::microseconds(timestamp);
28     // std::chrono::high_resolution_clock::time_point time_point(duration);
29     // std::time_t time_t =
30     // std::chrono::high_resolution_clock::to_time_t(time_point);
31     std::time_t time_t = timestamp / 1000000;
32     auto        gmtime = std::gmtime(&time_t);
33     char        buffer[32];
34     strftime(buffer, 32, "%Y-%m-%d %T.", gmtime);
35     char microseconds[7];
36     sprintf(microseconds, "%06llu", (long long unsigned int)timestamp % 1000000);
37     os << '[' << buffer << microseconds << ']';
38 }
39
40 std::thread::id this_thread_id()
41 {
42     static thread_local const std::thread::id id = std::this_thread::get_id();
43     return id;
44 }
45
46 template <typename T, typename Tuple>
47 struct TupleIndex;
48
49 template <typename T, typename... Types>
50 struct TupleIndex<T, std::tuple<T, Types...> > {
51     static constexpr const std::size_t value = 0;
52 };
53
54 template <typename T, typename U, typename... Types>
55 struct TupleIndex<T, std::tuple<U, Types...> > {
56     static constexpr const std::size_t value =
57         1 + TupleIndex<T, std::tuple<Types...> >::value;
58 };
59
60 }  // anonymous namespace
61
62 typedef std::tuple<char, uint32_t, uint64_t, int32_t, int64_t, double,
63                    VDebug::string_literal_t, char*>
64     SupportedTypes;
65
66 char const* to_string(LogLevel loglevel)
67 {
68     switch (loglevel) {
69     case LogLevel::OFF:
70         return "OFF";
71     case LogLevel::INFO:
72         return "INFO";
73     case LogLevel::WARN:
74         return "WARN";
75     case LogLevel::CRIT:
76         return "CRIT";
77     }
78     return "XXXX";
79 }
80
81 template <typename Arg>
82 void VDebug::encode(Arg arg)
83 {
84     *reinterpret_cast<Arg*>(buffer()) = arg;
85     m_bytes_used += sizeof(Arg);
86 }
87
88 template <typename Arg>
89 void VDebug::encode(Arg arg, uint8_t type_id)
90 {
91     resize_buffer_if_needed(sizeof(Arg) + sizeof(uint8_t));
92     encode<uint8_t>(type_id);
93     encode<Arg>(arg);
94 }
95
96 VDebug::VDebug(LogLevel level, char const* file, char const* function,
97                uint32_t line)
98     : m_bytes_used(0), m_buffer_size(sizeof(m_stack_buffer))
99 {
100     encode<uint64_t>(timestamp_now());
101     encode<std::thread::id>(this_thread_id());
102     encode<string_literal_t>(string_literal_t(file));
103     encode<string_literal_t>(string_literal_t(function));
104     encode<uint32_t>(line);
105     encode<LogLevel>(level);
106     if (level == LogLevel::INFO) {
107         m_logAll = false;
108     } else {
109         m_logAll = true;
110     }
111 }
112
113 VDebug::~VDebug() = default;
114
115 void VDebug::stringify(std::ostream& os)
116 {
117     char*             b = !m_heap_buffer ? m_stack_buffer : m_heap_buffer.get();
118     char const* const end = b + m_bytes_used;
119     uint64_t          timestamp = *reinterpret_cast<uint64_t*>(b);
120     b += sizeof(uint64_t);
121     std::thread::id threadid = *reinterpret_cast<std::thread::id*>(b);
122     b += sizeof(std::thread::id);
123     string_literal_t file = *reinterpret_cast<string_literal_t*>(b);
124     b += sizeof(string_literal_t);
125     string_literal_t function = *reinterpret_cast<string_literal_t*>(b);
126     b += sizeof(string_literal_t);
127     uint32_t line = *reinterpret_cast<uint32_t*>(b);
128     b += sizeof(uint32_t);
129     LogLevel loglevel = *reinterpret_cast<LogLevel*>(b);
130     b += sizeof(LogLevel);
131     if (m_logAll) {
132         format_timestamp(os, timestamp);
133
134         os << '[' << to_string(loglevel) << ']' << '[' << threadid << ']' << '['
135            << file.m_s << ':' << function.m_s << ':' << line << "] ";
136     }
137
138     stringify(os, b, end);
139     os << std::endl;
140
141     if (loglevel >= LogLevel::CRIT) os.flush();
142 }
143
144 template <typename Arg>
145 char* decode(std::ostream& os, char* b, Arg* dummy)
146 {
147     Arg arg = *reinterpret_cast<Arg*>(b);
148     os << arg;
149     return b + sizeof(Arg);
150 }
151
152 template <>
153 char* decode(std::ostream& os, char* b, VDebug::string_literal_t* dummy)
154 {
155     VDebug::string_literal_t s =
156         *reinterpret_cast<VDebug::string_literal_t*>(b);
157     os << s.m_s;
158     return b + sizeof(VDebug::string_literal_t);
159 }
160
161 template <>
162 char* decode(std::ostream& os, char* b, char** dummy)
163 {
164     while (*b != '\0') {
165         os << *b;
166         ++b;
167     }
168     return ++b;
169 }
170
171 void VDebug::stringify(std::ostream& os, char* start, char const* const end)
172 {
173     if (start == end) return;
174
175     int type_id = static_cast<int>(*start);
176     start++;
177
178     switch (type_id) {
179     case 0:
180         stringify(
181             os,
182             decode(os, start,
183                    static_cast<std::tuple_element<0, SupportedTypes>::type*>(
184                        nullptr)),
185             end);
186         return;
187     case 1:
188         stringify(
189             os,
190             decode(os, start,
191                    static_cast<std::tuple_element<1, SupportedTypes>::type*>(
192                        nullptr)),
193             end);
194         return;
195     case 2:
196         stringify(
197             os,
198             decode(os, start,
199                    static_cast<std::tuple_element<2, SupportedTypes>::type*>(
200                        nullptr)),
201             end);
202         return;
203     case 3:
204         stringify(
205             os,
206             decode(os, start,
207                    static_cast<std::tuple_element<3, SupportedTypes>::type*>(
208                        nullptr)),
209             end);
210         return;
211     case 4:
212         stringify(
213             os,
214             decode(os, start,
215                    static_cast<std::tuple_element<4, SupportedTypes>::type*>(
216                        nullptr)),
217             end);
218         return;
219     case 5:
220         stringify(
221             os,
222             decode(os, start,
223                    static_cast<std::tuple_element<5, SupportedTypes>::type*>(
224                        nullptr)),
225             end);
226         return;
227     case 6:
228         stringify(
229             os,
230             decode(os, start,
231                    static_cast<std::tuple_element<6, SupportedTypes>::type*>(
232                        nullptr)),
233             end);
234         return;
235     case 7:
236         stringify(
237             os,
238             decode(os, start,
239                    static_cast<std::tuple_element<7, SupportedTypes>::type*>(
240                        nullptr)),
241             end);
242         return;
243     }
244 }
245
246 char* VDebug::buffer()
247 {
248     return !m_heap_buffer ? &m_stack_buffer[m_bytes_used]
249                           : &(m_heap_buffer.get())[m_bytes_used];
250 }
251
252 void VDebug::resize_buffer_if_needed(size_t additional_bytes)
253 {
254     size_t const required_size = m_bytes_used + additional_bytes;
255
256     if (required_size <= m_buffer_size) return;
257
258     if (!m_heap_buffer) {
259         m_buffer_size = std::max(static_cast<size_t>(512), required_size);
260         m_heap_buffer = std::make_unique<char[]>(m_buffer_size);
261         memcpy(m_heap_buffer.get(), m_stack_buffer, m_bytes_used);
262         return;
263     } else {
264         m_buffer_size =
265             std::max(static_cast<size_t>(2 * m_buffer_size), required_size);
266         std::unique_ptr<char[]> new_heap_buffer(new char[m_buffer_size]);
267         memcpy(new_heap_buffer.get(), m_heap_buffer.get(), m_bytes_used);
268         m_heap_buffer.swap(new_heap_buffer);
269     }
270 }
271
272 void VDebug::encode(char const* arg)
273 {
274     if (arg != nullptr) encode_c_string(arg, strlen(arg));
275 }
276
277 void VDebug::encode(char* arg)
278 {
279     if (arg != nullptr) encode_c_string(arg, strlen(arg));
280 }
281
282 void VDebug::encode_c_string(char const* arg, size_t length)
283 {
284     if (length == 0) return;
285
286     resize_buffer_if_needed(1 + length + 1);
287     char* b = buffer();
288     auto  type_id = TupleIndex<char*, SupportedTypes>::value;
289     *reinterpret_cast<uint8_t*>(b++) = static_cast<uint8_t>(type_id);
290     memcpy(b, arg, length + 1);
291     m_bytes_used += 1 + length + 1;
292 }
293
294 void VDebug::encode(string_literal_t arg)
295 {
296     encode<string_literal_t>(
297         arg, TupleIndex<string_literal_t, SupportedTypes>::value);
298 }
299
300 VDebug& VDebug::operator<<(std::string const& arg)
301 {
302     encode_c_string(arg.c_str(), arg.length());
303     return *this;
304 }
305
306 VDebug& VDebug::operator<<(int32_t arg)
307 {
308     encode<int32_t>(arg, TupleIndex<int32_t, SupportedTypes>::value);
309     return *this;
310 }
311
312 VDebug& VDebug::operator<<(uint32_t arg)
313 {
314     encode<uint32_t>(arg, TupleIndex<uint32_t, SupportedTypes>::value);
315     return *this;
316 }
317
318 //    VDebug& VDebug::operator<<(int64_t arg)
319 //    {
320 //    encode < int64_t >(arg, TupleIndex < int64_t, SupportedTypes >::value);
321 //    return *this;
322 //    }
323
324 //    VDebug& VDebug::operator<<(uint64_t arg)
325 //    {
326 //    encode < uint64_t >(arg, TupleIndex < uint64_t, SupportedTypes >::value);
327 //    return *this;
328 //    }
329 VDebug& VDebug::operator<<(unsigned long arg)
330 {
331     encode<uint64_t>(arg, TupleIndex<uint64_t, SupportedTypes>::value);
332     return *this;
333 }
334
335 VDebug& VDebug::operator<<(long arg)
336 {
337     encode<int64_t>(arg, TupleIndex<int64_t, SupportedTypes>::value);
338     return *this;
339 }
340
341 VDebug& VDebug::operator<<(double arg)
342 {
343     encode<double>(arg, TupleIndex<double, SupportedTypes>::value);
344     return *this;
345 }
346
347 VDebug& VDebug::operator<<(char arg)
348 {
349     encode<char>(arg, TupleIndex<char, SupportedTypes>::value);
350     return *this;
351 }
352
353 struct BufferBase {
354     virtual ~BufferBase() = default;
355     virtual void push(VDebug&& logline) = 0;
356     virtual bool try_pop(VDebug& logline) = 0;
357 };
358
359 struct SpinLock {
360     SpinLock(std::atomic_flag& flag) : m_flag(flag)
361     {
362         while (m_flag.test_and_set(std::memory_order_acquire))
363             ;
364     }
365
366     ~SpinLock() { m_flag.clear(std::memory_order_release); }
367
368 private:
369     std::atomic_flag& m_flag;
370 };
371
372 /* Multi Producer Single Consumer Ring Buffer */
373 class RingBuffer : public BufferBase {
374 public:
375     struct alignas(64) Item {
376         Item()
377             : flag(), written(0), logline(LogLevel::INFO, nullptr, nullptr, 0)
378         {
379         }
380
381         std::atomic_flag flag;
382         char             written;
383         char             padding[256 - sizeof(std::atomic_flag) - sizeof(char) -
384                      sizeof(VDebug)];
385         VDebug           logline;
386     };
387
388     RingBuffer(size_t const size)
389         : m_size(size),
390           m_ring(static_cast<Item*>(std::malloc(size * sizeof(Item)))),
391           m_write_index(0),
392           m_read_index(0)
393     {
394         for (size_t i = 0; i < m_size; ++i) {
395             new (&m_ring[i]) Item();
396         }
397         static_assert(sizeof(Item) == 256, "Unexpected size != 256");
398     }
399
400     ~RingBuffer() override
401     {
402         for (size_t i = 0; i < m_size; ++i) {
403             m_ring[i].~Item();
404         }
405         std::free(m_ring);
406     }
407
408     void push(VDebug&& logline) override
409     {
410         unsigned int write_index =
411             m_write_index.fetch_add(1, std::memory_order_relaxed) % m_size;
412         Item&    item = m_ring[write_index];
413         SpinLock spinlock(item.flag);
414         item.logline = std::move(logline);
415         item.written = 1;
416     }
417
418     bool try_pop(VDebug& logline) override
419     {
420         Item&    item = m_ring[m_read_index % m_size];
421         SpinLock spinlock(item.flag);
422         if (item.written == 1) {
423             logline = std::move(item.logline);
424             item.written = 0;
425             ++m_read_index;
426             return true;
427         }
428         return false;
429     }
430
431     RingBuffer(RingBuffer const&) = delete;
432     RingBuffer& operator=(RingBuffer const&) = delete;
433
434 private:
435     size_t const              m_size;
436     Item*                     m_ring;
437     std::atomic<unsigned int> m_write_index;
438     char                      pad[64];
439     unsigned int              m_read_index;
440 };
441
442 class Buffer {
443 public:
444     struct Item {
445         Item(VDebug&& logline) : logline(std::move(logline)) {}
446         char   padding[256 - sizeof(VDebug)];
447         VDebug logline;
448     };
449
450     static constexpr const size_t size =
451         32768;  // 8MB. Helps reduce memory fragmentation
452
453     Buffer() : m_buffer(static_cast<Item*>(std::malloc(size * sizeof(Item))))
454     {
455         for (size_t i = 0; i <= size; ++i) {
456             m_write_state[i].store(0, std::memory_order_relaxed);
457         }
458         static_assert(sizeof(Item) == 256, "Unexpected size != 256");
459     }
460
461     ~Buffer()
462     {
463         unsigned int write_count = m_write_state[size].load();
464         for (size_t i = 0; i < write_count; ++i) {
465             m_buffer[i].~Item();
466         }
467         std::free(m_buffer);
468     }
469
470     // Returns true if we need to switch to next buffer
471     bool push(VDebug&& logline, unsigned int const write_index)
472     {
473         new (&m_buffer[write_index]) Item(std::move(logline));
474         m_write_state[write_index].store(1, std::memory_order_release);
475         return m_write_state[size].fetch_add(1, std::memory_order_acquire) +
476                    1 ==
477                size;
478     }
479
480     bool try_pop(VDebug& logline, unsigned int const read_index)
481     {
482         if (m_write_state[read_index].load(std::memory_order_acquire)) {
483             Item& item = m_buffer[read_index];
484             logline = std::move(item.logline);
485             return true;
486         }
487         return false;
488     }
489
490     Buffer(Buffer const&) = delete;
491     Buffer& operator=(Buffer const&) = delete;
492
493 private:
494     Item*                     m_buffer;
495     std::atomic<unsigned int> m_write_state[size + 1];
496 };
497
498 class QueueBuffer : public BufferBase {
499 public:
500     QueueBuffer(QueueBuffer const&) = delete;
501     QueueBuffer& operator=(QueueBuffer const&) = delete;
502
503     QueueBuffer()
504         : m_current_read_buffer{nullptr},
505           m_write_index(0),
506           m_flag(),
507           m_read_index(0)
508     {
509         setup_next_write_buffer();
510     }
511
512     void push(VDebug&& logline) override
513     {
514         unsigned int write_index =
515             m_write_index.fetch_add(1, std::memory_order_relaxed);
516         if (write_index < Buffer::size) {
517             if (m_current_write_buffer.load(std::memory_order_acquire)
518                     ->push(std::move(logline), write_index)) {
519                 setup_next_write_buffer();
520             }
521         } else {
522             while (m_write_index.load(std::memory_order_acquire) >=
523                    Buffer::size)
524                 ;
525             push(std::move(logline));
526         }
527     }
528
529     bool try_pop(VDebug& logline) override
530     {
531         if (m_current_read_buffer == nullptr)
532             m_current_read_buffer = get_next_read_buffer();
533
534         Buffer* read_buffer = m_current_read_buffer;
535
536         if (read_buffer == nullptr) return false;
537
538         if (read_buffer->try_pop(logline, m_read_index)) {
539             m_read_index++;
540             if (m_read_index == Buffer::size) {
541                 m_read_index = 0;
542                 m_current_read_buffer = nullptr;
543                 SpinLock spinlock(m_flag);
544                 m_buffers.pop();
545             }
546             return true;
547         }
548
549         return false;
550     }
551
552 private:
553     void setup_next_write_buffer()
554     {
555         std::unique_ptr<Buffer> next_write_buffer(new Buffer());
556         m_current_write_buffer.store(next_write_buffer.get(),
557                                      std::memory_order_release);
558         SpinLock spinlock(m_flag);
559         m_buffers.push(std::move(next_write_buffer));
560         m_write_index.store(0, std::memory_order_relaxed);
561     }
562
563     Buffer* get_next_read_buffer()
564     {
565         SpinLock spinlock(m_flag);
566         return m_buffers.empty() ? nullptr : m_buffers.front().get();
567     }
568
569 private:
570     std::queue<std::unique_ptr<Buffer> > m_buffers;
571     std::atomic<Buffer*>                 m_current_write_buffer;
572     Buffer*                              m_current_read_buffer;
573     std::atomic<unsigned int>            m_write_index;
574     std::atomic_flag                     m_flag;
575     unsigned int                         m_read_index;
576 };
577
578 class FileWriter {
579 public:
580     FileWriter(std::string const& log_directory,
581                std::string const& log_file_name, uint32_t log_file_roll_size_mb)
582         : m_log_file_roll_size_bytes(log_file_roll_size_mb * 1024 * 1024),
583           m_name(log_directory + log_file_name)
584     {
585         roll_file();
586     }
587
588     void write(VDebug& logline)
589     {
590         auto pos = m_os->tellp();
591         logline.stringify(*m_os);
592         m_bytes_written += m_os->tellp() - pos;
593         if (m_bytes_written > m_log_file_roll_size_bytes) {
594             roll_file();
595         }
596     }
597
598 private:
599     void roll_file()
600     {
601         if (m_os) {
602             m_os->flush();
603             m_os->close();
604         }
605
606         m_bytes_written = 0;
607         m_os = std::make_unique<std::ofstream>();
608         // TODO Optimize this part. Does it even matter ?
609         std::string log_file_name = m_name;
610         log_file_name.append(".");
611         log_file_name.append(std::to_string(++m_file_number));
612         log_file_name.append(".txt");
613         m_os->open(log_file_name, std::ofstream::out | std::ofstream::trunc);
614     }
615
616 private:
617     uint32_t                       m_file_number = 0;
618     std::streamoff                 m_bytes_written = 0;
619     uint32_t const                 m_log_file_roll_size_bytes;
620     std::string const              m_name;
621     std::unique_ptr<std::ofstream> m_os;
622 };
623
624 class NanoLogger {
625 public:
626     NanoLogger(NonGuaranteedLogger ngl, std::string const& log_directory,
627                std::string const& log_file_name, uint32_t log_file_roll_size_mb)
628         : m_state(State::INIT),
629           m_buffer_base(
630               new RingBuffer(std::max(1u, ngl.ring_buffer_size_mb) * 1024 * 4)),
631           m_file_writer(log_directory, log_file_name,
632                         std::max(1u, log_file_roll_size_mb)),
633           m_thread(&NanoLogger::pop, this)
634     {
635         m_state.store(State::READY, std::memory_order_release);
636     }
637
638     NanoLogger(GuaranteedLogger gl, std::string const& log_directory,
639                std::string const& log_file_name, uint32_t log_file_roll_size_mb)
640         : m_state(State::INIT),
641           m_buffer_base(new QueueBuffer()),
642           m_file_writer(log_directory, log_file_name,
643                         std::max(1u, log_file_roll_size_mb)),
644           m_thread(&NanoLogger::pop, this)
645     {
646         m_state.store(State::READY, std::memory_order_release);
647     }
648
649     ~NanoLogger()
650     {
651         m_state.store(State::SHUTDOWN);
652         m_thread.join();
653     }
654
655     void add(VDebug&& logline) { m_buffer_base->push(std::move(logline)); }
656
657     void pop()
658     {
659         // Wait for constructor to complete and pull all stores done there to
660         // this thread / core.
661         while (m_state.load(std::memory_order_acquire) == State::INIT)
662             std::this_thread::sleep_for(std::chrono::microseconds(50));
663
664         VDebug logline(LogLevel::INFO, nullptr, nullptr, 0);
665
666         while (m_state.load() == State::READY) {
667             if (m_buffer_base->try_pop(logline))
668                 m_file_writer.write(logline);
669             else
670                 std::this_thread::sleep_for(std::chrono::microseconds(50));
671         }
672
673         // Pop and log all remaining entries
674         while (m_buffer_base->try_pop(logline)) {
675             m_file_writer.write(logline);
676         }
677     }
678
679 private:
680     enum class State { INIT, READY, SHUTDOWN };
681
682     std::atomic<State>          m_state;
683     std::unique_ptr<BufferBase> m_buffer_base;
684     FileWriter                  m_file_writer;
685     std::thread                 m_thread;
686 };
687
688 std::unique_ptr<NanoLogger> nanologger;
689 std::atomic<NanoLogger*>    atomic_nanologger;
690
691 bool VDebugServer::operator==(VDebug& logline)
692 {
693     atomic_nanologger.load(std::memory_order_acquire)->add(std::move(logline));
694     return true;
695 }
696
697 void initialize(NonGuaranteedLogger ngl, std::string const& log_directory,
698                 std::string const& log_file_name,
699                 uint32_t           log_file_roll_size_mb)
700 {
701     nanologger = std::make_unique<NanoLogger>(ngl, log_directory, log_file_name,
702                                     log_file_roll_size_mb);
703     atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
704 }
705
706 void initialize(GuaranteedLogger gl, std::string const& log_directory,
707                 std::string const& log_file_name,
708                 uint32_t           log_file_roll_size_mb)
709 {
710     nanologger = std::make_unique<NanoLogger>(gl, log_directory, log_file_name,
711                                     log_file_roll_size_mb);
712     atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
713 }
714
715 std::atomic<unsigned int> loglevel = {0};
716
717 void set_log_level(LogLevel level)
718 {
719     loglevel.store(static_cast<unsigned int>(level), std::memory_order_release);
720 }
721
722 bool is_logged(LogLevel level)
723 {
724     return static_cast<unsigned int>(level) >=
725            loglevel.load(std::memory_order_relaxed);
726 }