lottie/vector: move pixman code to separate pixman folder.
[platform/core/uifw/lottie-player.git] / src / vector / vdebug.cpp
1 /*
2  * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
3  *
4  * Licensed under the Flora License, Version 1.1 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://floralicense.org/license/
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "vdebug.h"
18 #include <atomic>
19 #include <chrono>
20 #include <cstring>
21 #include <ctime>
22 #include <fstream>
23 #include <memory>
24 #include <queue>
25 #include <sstream>
26 #include <thread>
27 #include <tuple>
28
29 namespace {
30
31 /* Returns microseconds since epoch */
32 uint64_t timestamp_now()
33 {
34     return std::chrono::duration_cast<std::chrono::microseconds>(
35                std::chrono::high_resolution_clock::now().time_since_epoch())
36         .count();
37 }
38
39 /* I want [2016-10-13 00:01:23.528514] */
40 void format_timestamp(std::ostream& os, uint64_t timestamp)
41 {
42     // The next 3 lines do not work on MSVC!
43     // auto duration = std::chrono::microseconds(timestamp);
44     // std::chrono::high_resolution_clock::time_point time_point(duration);
45     // std::time_t time_t =
46     // std::chrono::high_resolution_clock::to_time_t(time_point);
47     std::time_t time_t = timestamp / 1000000;
48     auto        gmtime = std::gmtime(&time_t);
49     char        buffer[32];
50     strftime(buffer, 32, "%Y-%m-%d %T.", gmtime);
51     char microseconds[7];
52     snprintf(microseconds, 7, "%06llu", (long long unsigned int)timestamp % 1000000);
53     os << '[' << buffer << microseconds << ']';
54 }
55
56 std::thread::id this_thread_id()
57 {
58     static thread_local const std::thread::id id = std::this_thread::get_id();
59     return id;
60 }
61
62 template <typename T, typename Tuple>
63 struct TupleIndex;
64
65 template <typename T, typename... Types>
66 struct TupleIndex<T, std::tuple<T, Types...> > {
67     static constexpr const std::size_t value = 0;
68 };
69
70 template <typename T, typename U, typename... Types>
71 struct TupleIndex<T, std::tuple<U, Types...> > {
72     static constexpr const std::size_t value =
73         1 + TupleIndex<T, std::tuple<Types...> >::value;
74 };
75
76 }  // anonymous namespace
77
78 typedef std::tuple<char, uint32_t, uint64_t, int32_t, int64_t, double,
79                    VDebug::string_literal_t, char*>
80     SupportedTypes;
81
82 char const* to_string(LogLevel loglevel)
83 {
84     switch (loglevel) {
85     case LogLevel::OFF:
86         return "OFF";
87     case LogLevel::INFO:
88         return "INFO";
89     case LogLevel::WARN:
90         return "WARN";
91     case LogLevel::CRIT:
92         return "CRIT";
93     }
94     return "XXXX";
95 }
96
97 template <typename Arg>
98 void VDebug::encode(Arg arg)
99 {
100     *reinterpret_cast<Arg*>(buffer()) = arg;
101     m_bytes_used += sizeof(Arg);
102 }
103
104 template <typename Arg>
105 void VDebug::encode(Arg arg, uint8_t type_id)
106 {
107     resize_buffer_if_needed(sizeof(Arg) + sizeof(uint8_t));
108     encode<uint8_t>(type_id);
109     encode<Arg>(arg);
110 }
111
112 VDebug::VDebug(LogLevel level, char const* file, char const* function,
113                uint32_t line)
114     : m_bytes_used(0), m_buffer_size(sizeof(m_stack_buffer))
115 {
116     encode<uint64_t>(timestamp_now());
117     encode<std::thread::id>(this_thread_id());
118     encode<string_literal_t>(string_literal_t(file));
119     encode<string_literal_t>(string_literal_t(function));
120     encode<uint32_t>(line);
121     encode<LogLevel>(level);
122     if (level == LogLevel::INFO) {
123         m_logAll = false;
124     } else {
125         m_logAll = true;
126     }
127 }
128
129 VDebug::~VDebug() = default;
130
131 void VDebug::stringify(std::ostream& os)
132 {
133     char*             b = !m_heap_buffer ? m_stack_buffer : m_heap_buffer.get();
134     char const* const end = b + m_bytes_used;
135     uint64_t          timestamp = *reinterpret_cast<uint64_t*>(b);
136     b += sizeof(uint64_t);
137     std::thread::id threadid = *reinterpret_cast<std::thread::id*>(b);
138     b += sizeof(std::thread::id);
139     string_literal_t file = *reinterpret_cast<string_literal_t*>(b);
140     b += sizeof(string_literal_t);
141     string_literal_t function = *reinterpret_cast<string_literal_t*>(b);
142     b += sizeof(string_literal_t);
143     uint32_t line = *reinterpret_cast<uint32_t*>(b);
144     b += sizeof(uint32_t);
145     LogLevel loglevel = *reinterpret_cast<LogLevel*>(b);
146     b += sizeof(LogLevel);
147     if (m_logAll) {
148         format_timestamp(os, timestamp);
149
150         os << '[' << to_string(loglevel) << ']' << '[' << threadid << ']' << '['
151            << file.m_s << ':' << function.m_s << ':' << line << "] ";
152     }
153
154     stringify(os, b, end);
155     os << std::endl;
156
157     if (loglevel >= LogLevel::CRIT) os.flush();
158 }
159
160 template <typename Arg>
161 char* decode(std::ostream& os, char* b, Arg* dummy)
162 {
163     Arg arg = *reinterpret_cast<Arg*>(b);
164     os << arg;
165     return b + sizeof(Arg);
166 }
167
168 template <>
169 char* decode(std::ostream& os, char* b, VDebug::string_literal_t* dummy)
170 {
171     VDebug::string_literal_t s =
172         *reinterpret_cast<VDebug::string_literal_t*>(b);
173     os << s.m_s;
174     return b + sizeof(VDebug::string_literal_t);
175 }
176
177 template <>
178 char* decode(std::ostream& os, char* b, char** dummy)
179 {
180     while (*b != '\0') {
181         os << *b;
182         ++b;
183     }
184     return ++b;
185 }
186
187 void VDebug::stringify(std::ostream& os, char* start, char const* const end)
188 {
189     if (start == end) return;
190
191     int type_id = static_cast<int>(*start);
192     start++;
193
194     switch (type_id) {
195     case 0:
196         stringify(
197             os,
198             decode(os, start,
199                    static_cast<std::tuple_element<0, SupportedTypes>::type*>(
200                        nullptr)),
201             end);
202         return;
203     case 1:
204         stringify(
205             os,
206             decode(os, start,
207                    static_cast<std::tuple_element<1, SupportedTypes>::type*>(
208                        nullptr)),
209             end);
210         return;
211     case 2:
212         stringify(
213             os,
214             decode(os, start,
215                    static_cast<std::tuple_element<2, SupportedTypes>::type*>(
216                        nullptr)),
217             end);
218         return;
219     case 3:
220         stringify(
221             os,
222             decode(os, start,
223                    static_cast<std::tuple_element<3, SupportedTypes>::type*>(
224                        nullptr)),
225             end);
226         return;
227     case 4:
228         stringify(
229             os,
230             decode(os, start,
231                    static_cast<std::tuple_element<4, SupportedTypes>::type*>(
232                        nullptr)),
233             end);
234         return;
235     case 5:
236         stringify(
237             os,
238             decode(os, start,
239                    static_cast<std::tuple_element<5, SupportedTypes>::type*>(
240                        nullptr)),
241             end);
242         return;
243     case 6:
244         stringify(
245             os,
246             decode(os, start,
247                    static_cast<std::tuple_element<6, SupportedTypes>::type*>(
248                        nullptr)),
249             end);
250         return;
251     case 7:
252         stringify(
253             os,
254             decode(os, start,
255                    static_cast<std::tuple_element<7, SupportedTypes>::type*>(
256                        nullptr)),
257             end);
258         return;
259     }
260 }
261
262 char* VDebug::buffer()
263 {
264     return !m_heap_buffer ? &m_stack_buffer[m_bytes_used]
265                           : &(m_heap_buffer.get())[m_bytes_used];
266 }
267
268 void VDebug::resize_buffer_if_needed(size_t additional_bytes)
269 {
270     size_t const required_size = m_bytes_used + additional_bytes;
271
272     if (required_size <= m_buffer_size) return;
273
274     if (!m_heap_buffer) {
275         m_buffer_size = std::max(static_cast<size_t>(512), required_size);
276         m_heap_buffer = std::make_unique<char[]>(m_buffer_size);
277         memcpy(m_heap_buffer.get(), m_stack_buffer, m_bytes_used);
278         return;
279     } else {
280         m_buffer_size =
281             std::max(static_cast<size_t>(2 * m_buffer_size), required_size);
282         std::unique_ptr<char[]> new_heap_buffer(new char[m_buffer_size]);
283         memcpy(new_heap_buffer.get(), m_heap_buffer.get(), m_bytes_used);
284         m_heap_buffer.swap(new_heap_buffer);
285     }
286 }
287
288 void VDebug::encode(char const* arg)
289 {
290     if (arg != nullptr) encode_c_string(arg, strlen(arg));
291 }
292
293 void VDebug::encode(char* arg)
294 {
295     if (arg != nullptr) encode_c_string(arg, strlen(arg));
296 }
297
298 void VDebug::encode_c_string(char const* arg, size_t length)
299 {
300     if (length == 0) return;
301
302     resize_buffer_if_needed(1 + length + 1);
303     char* b = buffer();
304     auto  type_id = TupleIndex<char*, SupportedTypes>::value;
305     *reinterpret_cast<uint8_t*>(b++) = static_cast<uint8_t>(type_id);
306     memcpy(b, arg, length + 1);
307     m_bytes_used += 1 + length + 1;
308 }
309
310 void VDebug::encode(string_literal_t arg)
311 {
312     encode<string_literal_t>(
313         arg, TupleIndex<string_literal_t, SupportedTypes>::value);
314 }
315
316 VDebug& VDebug::operator<<(std::string const& arg)
317 {
318     encode_c_string(arg.c_str(), arg.length());
319     return *this;
320 }
321
322 VDebug& VDebug::operator<<(int32_t arg)
323 {
324     encode<int32_t>(arg, TupleIndex<int32_t, SupportedTypes>::value);
325     return *this;
326 }
327
328 VDebug& VDebug::operator<<(uint32_t arg)
329 {
330     encode<uint32_t>(arg, TupleIndex<uint32_t, SupportedTypes>::value);
331     return *this;
332 }
333
334 //    VDebug& VDebug::operator<<(int64_t arg)
335 //    {
336 //    encode < int64_t >(arg, TupleIndex < int64_t, SupportedTypes >::value);
337 //    return *this;
338 //    }
339
340 //    VDebug& VDebug::operator<<(uint64_t arg)
341 //    {
342 //    encode < uint64_t >(arg, TupleIndex < uint64_t, SupportedTypes >::value);
343 //    return *this;
344 //    }
345 VDebug& VDebug::operator<<(unsigned long arg)
346 {
347     encode<uint64_t>(arg, TupleIndex<uint64_t, SupportedTypes>::value);
348     return *this;
349 }
350
351 VDebug& VDebug::operator<<(long arg)
352 {
353     encode<int64_t>(arg, TupleIndex<int64_t, SupportedTypes>::value);
354     return *this;
355 }
356
357 VDebug& VDebug::operator<<(double arg)
358 {
359     encode<double>(arg, TupleIndex<double, SupportedTypes>::value);
360     return *this;
361 }
362
363 VDebug& VDebug::operator<<(char arg)
364 {
365     encode<char>(arg, TupleIndex<char, SupportedTypes>::value);
366     return *this;
367 }
368
369 struct BufferBase {
370     virtual ~BufferBase() = default;
371     virtual void push(VDebug&& logline) = 0;
372     virtual bool try_pop(VDebug& logline) = 0;
373 };
374
375 struct SpinLock {
376     SpinLock(std::atomic_flag& flag) : m_flag(flag)
377     {
378         while (m_flag.test_and_set(std::memory_order_acquire))
379             ;
380     }
381
382     ~SpinLock() { m_flag.clear(std::memory_order_release); }
383
384 private:
385     std::atomic_flag& m_flag;
386 };
387
388 /* Multi Producer Single Consumer Ring Buffer */
389 class RingBuffer : public BufferBase {
390 public:
391     struct alignas(64) Item {
392         Item()
393             : flag(), written(0), logline(LogLevel::INFO, nullptr, nullptr, 0)
394         {
395         }
396
397         std::atomic_flag flag;
398         char             written;
399         char             padding[256 - sizeof(std::atomic_flag) - sizeof(char) -
400                      sizeof(VDebug)];
401         VDebug           logline;
402     };
403
404     RingBuffer(size_t const size)
405         : m_size(size),
406           m_ring(static_cast<Item*>(std::malloc(size * sizeof(Item)))),
407           m_write_index(0),
408           m_read_index(0)
409     {
410         for (size_t i = 0; i < m_size; ++i) {
411             new (&m_ring[i]) Item();
412         }
413         static_assert(sizeof(Item) == 256, "Unexpected size != 256");
414     }
415
416     ~RingBuffer() override
417     {
418         for (size_t i = 0; i < m_size; ++i) {
419             m_ring[i].~Item();
420         }
421         std::free(m_ring);
422     }
423
424     void push(VDebug&& logline) override
425     {
426         unsigned int write_index =
427             m_write_index.fetch_add(1, std::memory_order_relaxed) % m_size;
428         Item&    item = m_ring[write_index];
429         SpinLock spinlock(item.flag);
430         item.logline = std::move(logline);
431         item.written = 1;
432     }
433
434     bool try_pop(VDebug& logline) override
435     {
436         Item&    item = m_ring[m_read_index % m_size];
437         SpinLock spinlock(item.flag);
438         if (item.written == 1) {
439             logline = std::move(item.logline);
440             item.written = 0;
441             ++m_read_index;
442             return true;
443         }
444         return false;
445     }
446
447     RingBuffer(RingBuffer const&) = delete;
448     RingBuffer& operator=(RingBuffer const&) = delete;
449
450 private:
451     size_t const              m_size;
452     Item*                     m_ring;
453     std::atomic<unsigned int> m_write_index;
454     char                      pad[64];
455     unsigned int              m_read_index;
456 };
457
458 class Buffer {
459 public:
460     struct Item {
461         Item(VDebug&& logline) : logline(std::move(logline)) {}
462         char   padding[256 - sizeof(VDebug)];
463         VDebug logline;
464     };
465
466     static constexpr const size_t size =
467         32768;  // 8MB. Helps reduce memory fragmentation
468
469     Buffer() : m_buffer(static_cast<Item*>(std::malloc(size * sizeof(Item))))
470     {
471         for (size_t i = 0; i <= size; ++i) {
472             m_write_state[i].store(0, std::memory_order_relaxed);
473         }
474         static_assert(sizeof(Item) == 256, "Unexpected size != 256");
475     }
476
477     ~Buffer()
478     {
479         unsigned int write_count = m_write_state[size].load();
480         for (size_t i = 0; i < write_count; ++i) {
481             m_buffer[i].~Item();
482         }
483         std::free(m_buffer);
484     }
485
486     // Returns true if we need to switch to next buffer
487     bool push(VDebug&& logline, unsigned int const write_index)
488     {
489         new (&m_buffer[write_index]) Item(std::move(logline));
490         m_write_state[write_index].store(1, std::memory_order_release);
491         return m_write_state[size].fetch_add(1, std::memory_order_acquire) +
492                    1 ==
493                size;
494     }
495
496     bool try_pop(VDebug& logline, unsigned int const read_index)
497     {
498         if (m_write_state[read_index].load(std::memory_order_acquire)) {
499             Item& item = m_buffer[read_index];
500             logline = std::move(item.logline);
501             return true;
502         }
503         return false;
504     }
505
506     Buffer(Buffer const&) = delete;
507     Buffer& operator=(Buffer const&) = delete;
508
509 private:
510     Item*                     m_buffer;
511     std::atomic<unsigned int> m_write_state[size + 1];
512 };
513
514 class QueueBuffer : public BufferBase {
515 public:
516     QueueBuffer(QueueBuffer const&) = delete;
517     QueueBuffer& operator=(QueueBuffer const&) = delete;
518
519     QueueBuffer()
520         : m_current_read_buffer{nullptr},
521           m_write_index(0),
522           m_flag(),
523           m_read_index(0)
524     {
525         setup_next_write_buffer();
526     }
527
528     void push(VDebug&& logline) override
529     {
530         unsigned int write_index =
531             m_write_index.fetch_add(1, std::memory_order_relaxed);
532         if (write_index < Buffer::size) {
533             if (m_current_write_buffer.load(std::memory_order_acquire)
534                     ->push(std::move(logline), write_index)) {
535                 setup_next_write_buffer();
536             }
537         } else {
538             while (m_write_index.load(std::memory_order_acquire) >=
539                    Buffer::size)
540                 ;
541             push(std::move(logline));
542         }
543     }
544
545     bool try_pop(VDebug& logline) override
546     {
547         if (m_current_read_buffer == nullptr)
548             m_current_read_buffer = get_next_read_buffer();
549
550         Buffer* read_buffer = m_current_read_buffer;
551
552         if (read_buffer == nullptr) return false;
553
554         if (read_buffer->try_pop(logline, m_read_index)) {
555             m_read_index++;
556             if (m_read_index == Buffer::size) {
557                 m_read_index = 0;
558                 m_current_read_buffer = nullptr;
559                 SpinLock spinlock(m_flag);
560                 m_buffers.pop();
561             }
562             return true;
563         }
564
565         return false;
566     }
567
568 private:
569     void setup_next_write_buffer()
570     {
571         std::unique_ptr<Buffer> next_write_buffer(new Buffer());
572         m_current_write_buffer.store(next_write_buffer.get(),
573                                      std::memory_order_release);
574         SpinLock spinlock(m_flag);
575         m_buffers.push(std::move(next_write_buffer));
576         m_write_index.store(0, std::memory_order_relaxed);
577     }
578
579     Buffer* get_next_read_buffer()
580     {
581         SpinLock spinlock(m_flag);
582         return m_buffers.empty() ? nullptr : m_buffers.front().get();
583     }
584
585 private:
586     std::queue<std::unique_ptr<Buffer> > m_buffers;
587     std::atomic<Buffer*>                 m_current_write_buffer;
588     Buffer*                              m_current_read_buffer;
589     std::atomic<unsigned int>            m_write_index;
590     std::atomic_flag                     m_flag;
591     unsigned int                         m_read_index;
592 };
593
594 class FileWriter {
595 public:
596     FileWriter(std::string const& log_directory,
597                std::string const& log_file_name, uint32_t log_file_roll_size_mb)
598         : m_log_file_roll_size_bytes(log_file_roll_size_mb * 1024 * 1024),
599           m_name(log_directory + log_file_name)
600     {
601         roll_file();
602     }
603
604     void write(VDebug& logline)
605     {
606         auto pos = m_os->tellp();
607         logline.stringify(*m_os);
608         m_bytes_written += m_os->tellp() - pos;
609         if (m_bytes_written > m_log_file_roll_size_bytes) {
610             roll_file();
611         }
612     }
613
614 private:
615     void roll_file()
616     {
617         if (m_os) {
618             m_os->flush();
619             m_os->close();
620         }
621
622         m_bytes_written = 0;
623         m_os = std::make_unique<std::ofstream>();
624         // TODO Optimize this part. Does it even matter ?
625         std::string log_file_name = m_name;
626         log_file_name.append(".");
627         log_file_name.append(std::to_string(++m_file_number));
628         log_file_name.append(".txt");
629         m_os->open(log_file_name, std::ofstream::out | std::ofstream::trunc);
630     }
631
632 private:
633     uint32_t                       m_file_number = 0;
634     std::streamoff                 m_bytes_written = 0;
635     uint32_t const                 m_log_file_roll_size_bytes;
636     std::string const              m_name;
637     std::unique_ptr<std::ofstream> m_os;
638 };
639
640 class NanoLogger {
641 public:
642     NanoLogger(NonGuaranteedLogger ngl, std::string const& log_directory,
643                std::string const& log_file_name, uint32_t log_file_roll_size_mb)
644         : m_state(State::INIT),
645           m_buffer_base(
646               new RingBuffer(std::max(1u, ngl.ring_buffer_size_mb) * 1024 * 4)),
647           m_file_writer(log_directory, log_file_name,
648                         std::max(1u, log_file_roll_size_mb)),
649           m_thread(&NanoLogger::pop, this)
650     {
651         m_state.store(State::READY, std::memory_order_release);
652     }
653
654     NanoLogger(GuaranteedLogger gl, std::string const& log_directory,
655                std::string const& log_file_name, uint32_t log_file_roll_size_mb)
656         : m_state(State::INIT),
657           m_buffer_base(new QueueBuffer()),
658           m_file_writer(log_directory, log_file_name,
659                         std::max(1u, log_file_roll_size_mb)),
660           m_thread(&NanoLogger::pop, this)
661     {
662         m_state.store(State::READY, std::memory_order_release);
663     }
664
665     ~NanoLogger()
666     {
667         m_state.store(State::SHUTDOWN);
668         m_thread.join();
669     }
670
671     void add(VDebug&& logline) { m_buffer_base->push(std::move(logline)); }
672
673     void pop()
674     {
675         // Wait for constructor to complete and pull all stores done there to
676         // this thread / core.
677         while (m_state.load(std::memory_order_acquire) == State::INIT)
678             std::this_thread::sleep_for(std::chrono::microseconds(50));
679
680         VDebug logline(LogLevel::INFO, nullptr, nullptr, 0);
681
682         while (m_state.load() == State::READY) {
683             if (m_buffer_base->try_pop(logline))
684                 m_file_writer.write(logline);
685             else
686                 std::this_thread::sleep_for(std::chrono::microseconds(50));
687         }
688
689         // Pop and log all remaining entries
690         while (m_buffer_base->try_pop(logline)) {
691             m_file_writer.write(logline);
692         }
693     }
694
695 private:
696     enum class State { INIT, READY, SHUTDOWN };
697
698     std::atomic<State>          m_state;
699     std::unique_ptr<BufferBase> m_buffer_base;
700     FileWriter                  m_file_writer;
701     std::thread                 m_thread;
702 };
703
704 std::unique_ptr<NanoLogger> nanologger;
705 std::atomic<NanoLogger*>    atomic_nanologger;
706
707 bool VDebugServer::operator==(VDebug& logline)
708 {
709     atomic_nanologger.load(std::memory_order_acquire)->add(std::move(logline));
710     return true;
711 }
712
713 void initialize(NonGuaranteedLogger ngl, std::string const& log_directory,
714                 std::string const& log_file_name,
715                 uint32_t           log_file_roll_size_mb)
716 {
717     nanologger = std::make_unique<NanoLogger>(ngl, log_directory, log_file_name,
718                                     log_file_roll_size_mb);
719     atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
720 }
721
722 void initialize(GuaranteedLogger gl, std::string const& log_directory,
723                 std::string const& log_file_name,
724                 uint32_t           log_file_roll_size_mb)
725 {
726     nanologger = std::make_unique<NanoLogger>(gl, log_directory, log_file_name,
727                                     log_file_roll_size_mb);
728     atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
729 }
730
731 std::atomic<unsigned int> loglevel = {0};
732
733 void set_log_level(LogLevel level)
734 {
735     loglevel.store(static_cast<unsigned int>(level), std::memory_order_release);
736 }
737
738 bool is_logged(LogLevel level)
739 {
740     return static_cast<unsigned int>(level) >=
741            loglevel.load(std::memory_order_relaxed);
742 }