3 * Copyright (c) 2019-2020 Samsung Electronics Co., Ltd
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is furnished
10 * to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include <dlogutil-internal.h>
25 #include <queued_entry_timestamp.h>
28 #include "fdi_logger.h"
29 #include "fdi_zero_copy.h"
30 #include "logretrieve.h"
36 static const struct fd_ops *get_ops_by_buffer(size_t buf_id, int enabled_buffers, backend_t backend, bool is_compressed_memory)
38 if (!bit_test(enabled_buffers, buf_id))
41 /* These are implemented by the pipe daemon,
42 * regardless of the nominal backend. */
43 if (is_compressed_memory
44 || buf_id == LOG_ID_KMSG
45 || buf_id == LOG_ID_SYSLOG)
49 case BACKEND_PIPE : return &ops_pipe;
50 case BACKEND_ANDROID_LOGGER: return &ops_logger;
51 case BACKEND_ZERO_COPY : return &ops_zero_copy;
52 default : return NULL;
56 int create_initial_fdis(struct fd_info ***fdis, int enabled_buffers, backend_t backend, bool is_compressed_memory, const struct log_config *conf, log_id_t aliased[LOG_ID_MAX])
60 assert(backend != BACKEND_NONE);
61 assert(backend != BACKEND_NULL);
63 __attribute__((cleanup(fdi_array_free))) struct fd_info **fdi_ptrs = NULL;
67 const struct fd_ops *ops_by_buffer[LOG_ID_MAX];
68 for (size_t i = 0; i < NELEMS(ops_by_buffer); ++i)
69 ops_by_buffer[i] = get_ops_by_buffer(i, enabled_buffers, backend, is_compressed_memory);
71 fdi_ptrs = calloc(bit_count(enabled_buffers) + 1 /* NULL terminator */, sizeof *fdi_ptrs);
73 return TIZEN_ERROR_OUT_OF_MEMORY;
75 __attribute__ ((cleanup(list_clear_free_contents))) list_head used_paths = NULL;
78 for (int i = 0; i < LOG_ID_MAX; ++i) {
79 const struct fd_ops *const ops = ops_by_buffer[i];
83 struct fd_info *const fdi = fdi_create(ops, i);
87 r = fdi->ops->create(fdi, conf, i, &used_paths, &aliased[i]);
91 // TODO: Inform about that again somewhere else (except if r == -EALREADY and aliased[i] != INVALID)
95 fdi_ptrs[fdi_cnt++] = fdi;
99 return -EINVAL; // TODO: This has literally nothing to do with EINVAL
107 struct fd_info *find_earliest_log(struct fd_info **data_fds, int fd_count, dlogutil_sorting_order_e sort_by)
111 struct fd_info *best_fdi = NULL;
112 struct timespec best_ts = {};
114 for (int i = 0; i < fd_count; ++i) {
115 struct fd_info *const fdi = data_fds[i];
116 if (!fdi || !fdi->ops->has_log(fdi))
119 const dlogutil_entry_s *const le = fdi->ops->peek_entry(fdi);
120 struct timespec current_ts;
121 if (dlogutil_entry_get_timestamp(le, sort_by, ¤t_ts) != TIZEN_ERROR_NONE)
122 return fdi; // the timestampless log is always the best choice to flush ASAP
124 if (best_fdi && ((current_ts.tv_sec > best_ts.tv_sec) || (current_ts.tv_sec == best_ts.tv_sec && current_ts.tv_nsec > best_ts.tv_nsec)))
127 best_ts = current_ts;
134 int put_logs_into_vector(struct fd_info **data_fds, int fd_count, struct sort_vector *logs, struct log_filter *filter, bool limited_dump, bool *at_least_one_buffer_drained, bool *all_buffers_empty, dlogutil_entry_s **entry_out)
142 struct fd_info *best_fdi;
144 best_fdi = find_earliest_log(data_fds, fd_count, logs->sort_by);
146 *all_buffers_empty = true;
147 *at_least_one_buffer_drained = true;
148 return TIZEN_ERROR_NONE;
151 int r = fdi_push_log(best_fdi, logs, limited_dump, entry_out, filter);
156 if (!best_fdi->ops->has_log(best_fdi))
157 *at_least_one_buffer_drained = true;
158 return TIZEN_ERROR_NONE;
164 } while (best_fdi->ops->has_log(best_fdi)); // if a buffer got drained, break to give them all a chance to refill
166 *at_least_one_buffer_drained = true;
167 return TIZEN_ERROR_NONE;
170 static int state_prepare_single_print(dlogutil_state_s *state, int nfds, char *compress)
173 assert(state->mode != DLOGUTIL_MODE_NONPRINTING);
175 int r = state->data_fds[nfds]->ops->prepare_print(state->data_fds[nfds], state->dump_size, state->mode == DLOGUTIL_MODE_MONITOR, state->filter_object, compress);
177 return TIZEN_ERROR_IO_ERROR;
179 // everything went fine, but we're not printing this one
180 fdi_free(state->data_fds[nfds]);
181 state->data_fds[nfds] = NULL;
182 return TIZEN_ERROR_NONE;
185 int fd = state->data_fds[nfds]->fd;
186 r = fd_set_flags(fd, O_NONBLOCK);
188 return TIZEN_ERROR_IO_ERROR;
189 struct epoll_event ev = {
190 .data.ptr = state->data_fds[nfds],
193 state->enabled[nfds] = true;
195 if (!state->data_fds[nfds]->ops->poll_timeout) {
196 epoll_ctl(state->epollfd, EPOLL_CTL_ADD, fd, &ev);
199 ++ state->non_epoll_cnt;
202 return TIZEN_ERROR_NONE;
205 static int state_prepare_prints(dlogutil_state_s *state, char *compress)
208 assert(state->mode != DLOGUTIL_MODE_NONPRINTING);
210 state->enabled = calloc(state->fd_count, sizeof *state->enabled);
212 return TIZEN_ERROR_OUT_OF_MEMORY;
214 for (int nfds = 0; nfds < state->fd_count; ++nfds) {
215 int r = state_prepare_single_print(state, nfds, compress);
216 if (r != TIZEN_ERROR_NONE)
220 return TIZEN_ERROR_NONE;
223 int dlogutil_state_init(dlogutil_state_s *state, struct fd_info ***data_fds_ptr, int fd_count, dlogutil_config_s *config, bool sorting_needed, dlogutil_sorting_order_e real_sort_by, const struct log_config *conf, log_id_t aliased[LOG_ID_MAX])
225 assert(data_fds_ptr);
226 struct fd_info **data_fds = *data_fds_ptr;
231 // TODO: optimize mode == DLOGUTIL_MODE_NONPRINTING case
233 state->epoll_cnt = 0;
234 state->non_epoll_cnt = 0;
235 state->enabled = NULL;
236 state->filter_object = NULL;
238 state->fd_count = fd_count;
239 state->data_fds = data_fds;
240 state->mode = config->mode;
241 state->need_epoll = true;
242 state->flush_target = LONG_MAX;
243 state->dump_size = (config->mode == DLOGUTIL_MODE_DUMP) ? config->dump_size : 0;
244 *data_fds_ptr = NULL;
246 for (int i = 0; i < LOG_ID_MAX; ++i)
247 state->aliased[i] = aliased[i];
249 sort_vector_init(&state->logs);
251 state->epollfd = epoll_create1(0);
252 if (state->epollfd < 0)
253 return TIZEN_ERROR_IO_ERROR;
255 state->filter_object = log_filter_from_filter(&config->filter);
256 if (!state->filter_object)
257 return TIZEN_ERROR_OUT_OF_MEMORY;
258 if (log_filter_need_apply_default(state->filter_object) && log_filter_set_filterspec(state->filter_object, "*:D"))
259 return TIZEN_ERROR_OUT_OF_MEMORY;
261 sort_vector_apply_config(&state->logs, conf);
262 state->logs.size = sorting_needed ? config->logs_size : 0;
263 state->logs.sort_by = real_sort_by;
264 clock_gettime(get_proper_clock(real_sort_by), &state->logs.start);
266 if (config->mode == DLOGUTIL_MODE_DUMP && state->dump_size != DLOGUTIL_MAX_DUMP_SIZE)
267 state->logs.size = state->dump_size + 1;
269 if (!sort_vector_finalize(&state->logs))
270 return TIZEN_ERROR_OUT_OF_MEMORY;
272 if (state->mode != DLOGUTIL_MODE_NONPRINTING) {
273 int r = state_prepare_prints(state, config->compress);
274 if (r != TIZEN_ERROR_NONE)
278 if (state->epoll_cnt) {
279 state->evs = calloc(state->epoll_cnt, sizeof *state->evs);
281 return TIZEN_ERROR_OUT_OF_MEMORY;
284 return TIZEN_ERROR_NONE;
287 static bool flush_single_log(dlogutil_state_s *state, dlogutil_entry_s **out)
291 if (state->flush_target >= LONG_MAX
292 || sort_vector_empty(&state->logs))
295 dlogutil_entry_s *const e = sort_vector_back(&state->logs);
296 struct timespec log_ts;
297 if (!dlogutil_entry_get_timestamp(e, state->logs.sort_by, &log_ts)
298 && log_ts.tv_sec * 1000 + log_ts.tv_nsec / 1000000 > state->flush_target)
301 *out = sort_vector_pop_ret(&state->logs);
305 static bool handle_flush(dlogutil_state_s *state, dlogutil_entry_s **out)
309 bool const r = flush_single_log(state, out);
311 state->flush_target = LONG_MAX;
315 static void remove_drained_buffers(dlogutil_state_s *state)
317 assert(state->enabled);
319 /* Instead of removing buffers when .read returns 0, we do it
320 * in a separate loop, using their .eof "method". This is because
321 * removing buffers that way would fail if EOF would be triggered
322 * somewhere else and the buffer would never be .read again. */
324 for (int i = 0; i < state->fd_count; ++i) {
325 if (!state->enabled[i])
328 const struct fd_info *const fdi = state->data_fds[i];
329 if (!fdi->ops->eof(fdi))
332 state->enabled[i] = false;
334 if (!fdi->ops->poll_timeout) {
335 epoll_ctl(state->epollfd, EPOLL_CTL_DEL, fdi->fd, NULL);
338 -- state->non_epoll_cnt;
343 static int refill_fdi_buffers_regular(dlogutil_state_s *state, int timeout)
345 if (!state->epoll_cnt)
348 int const nfds = epoll_wait(state->epollfd, state->evs, state->epoll_cnt, timeout);
352 for (int i = 0; i < nfds; ++i) {
353 struct fd_info *const fdi = state->evs[i].data.ptr;
354 int const rd = fdi->ops->read(fdi);
355 if (rd < 0 && rd != -EAGAIN)
362 static int how_long_would_we_have_to_wait(dlogutil_state_s *state)
364 /* Pick the smallest timeout. We're using epoll timeout
365 * convention where -1 means infinite. This is mostly
366 * for future-proofing since the only client using this
367 * is the zero-copy backend which only has 1 FDI anyway. */
370 for (int i = 0; i < state->fd_count; ++i) {
371 if (!state->enabled[i])
374 const struct fd_info *const fdi = state->data_fds[i];
375 if (fdi->ops->eof(fdi))
378 const int timeout = fdi->ops->poll_timeout(fdi);
379 if (timeout != -1 && (timeout < ret || ret == -1))
385 static int refill_fdi_buffers_special(dlogutil_state_s *state, int timeout)
387 if (!state->non_epoll_cnt)
390 /* Figure out how long we would have to wait. Sleep until then, or at least
391 * as long as we are allowed to (can't always pull off an all-nighter). */
392 int needed_timeout = how_long_would_we_have_to_wait(state);
393 if (needed_timeout > timeout && timeout != -1) {
394 usleep(timeout * USEC_PER_MSEC);
398 // FIXME: can the sleep be interrupted via signals etc?
399 // also FIXME: can `needed_timeout` be -1 at this point?
401 usleep(needed_timeout * USEC_PER_MSEC);
402 for (int i = 0; i < state->fd_count; ++i) {
403 if (!state->enabled[i])
406 struct fd_info *const fdi = state->data_fds[i];
407 if (fdi->ops->eof(fdi))
410 int const rd = fdi->ops->read(fdi);
411 if (rd < 0 && rd != -EAGAIN)
418 static int refill_fdi_buffers(dlogutil_state_s *state, int timeout)
422 /* Both of the two modes below perform a timeout-limited
423 * sleep; they can't both do it at the same time or we
424 * would exceed the timeout. Fortunately they don't live
425 * together, so we can assume only one of the two will run. */
426 assert(state->non_epoll_cnt == 0 || state->epoll_cnt == 0);
428 /* Regular file descriptors are handled via a collective epoll
429 * to minimize the number of performed syscalls. */
430 r = refill_fdi_buffers_regular(state, timeout);
434 /* Special needs file descriptors have their own
435 * means of figuring out if they're ready to refill. */
436 r = refill_fdi_buffers_special(state, timeout);
443 static void set_flush_target(dlogutil_state_s *state, bool all_buffers_empty, long last_log_age)
445 /* For the dumping modes, don't flush since that just removes the
446 * ability to sort flushed and incoming logs against each other. */
447 if (state->mode == DLOGUTIL_MODE_DUMP)
450 /* The oldest log can be so fresh as to be from the future
451 * (i.e. has a negative age). This can happen when somebody
452 * changes the realtime clock backwards. In that case let
453 * such logs be force-flushed immediately since the realtime
454 * timestamp has lost its meaning and the monotonic timestamp
455 * might not be present. Otherwise the log would wait until
456 * the clock reaches its old value again. */
457 if (last_log_age < 0) {
458 state->flush_target = -1;
462 /* If all buffers got drained, we make the assumption that any logs
463 * yet to come would sort later than the ones we already have, so
464 * we can reasonably flush in that case.
466 * Otherwise, wait until the first burst of logs gets processed
467 * before flushing anything based on the timeout. Approximately all
468 * logs from the initial burst are old enough to be flushable right
469 * when they appear, but we want to wait a bit (through the second
470 * condition, which ends when the initial burst is processed fully)
471 * to make extra sure they're sorted correctly against each other. */
472 if (!all_buffers_empty && (!state->logs.old_logs_dumped || last_log_age < state->logs.timeout))
475 struct timespec reference_ts;
476 clock_gettime(get_proper_clock(state->logs.sort_by), &reference_ts);
477 state->flush_target = reference_ts.tv_sec * 1000 + reference_ts.tv_nsec / 1000000 - state->logs.timeout;
480 static int get_timeout(dlogutil_state_s *state, int timeout, long last_log_age)
482 if (sort_vector_empty(&state->logs))
485 int max_timeout = state->logs.timeout - last_log_age;
489 return timeout == -1 || timeout > max_timeout ? max_timeout : timeout;
493 * @brief Handle input
494 * @details The main loop reading log data
495 * @param[in] data_fds An array of FDs to read data from
496 * @param[in] fd_count Size of the above array
497 * @param[in] logs The log sorting vector
498 * @param[in] l_file File output metadata
499 * @return int 0 if successful, a negative value on error, callback's return value if it isn't 0
501 int do_print_once(dlogutil_state_s *state, int timeout, dlogutil_entry_s **out)
505 struct timespec now = { .tv_nsec = -1 };
508 clock_gettime(CLOCK_MONOTONIC, &now);
509 end_at = now.tv_sec * 1000 + now.tv_nsec / 1000000 + timeout;
512 const bool is_limited_dumping = state->mode == DLOGUTIL_MODE_DUMP
513 && state->dump_size != DLOGUTIL_MAX_DUMP_SIZE;
515 while (state->epoll_cnt > 0 || state->non_epoll_cnt > 0) {
516 /* Note: sort_vector_time_span assumes that either now.tv_nsec == -1,
517 * or now is the current CLOCK_MONOTONIC timestamp. If you change the clock,
518 * make sure to change that function! */
519 const long last_log_age = sort_vector_time_span(&state->logs, now);
520 timeout = get_timeout(state, timeout, last_log_age);
522 if (state->need_epoll) {
523 int r = refill_fdi_buffers(state, timeout);
525 return TIZEN_ERROR_IO_ERROR;
526 state->need_epoll = false;
529 if (handle_flush(state, out))
530 return TIZEN_ERROR_NONE;
532 bool all_buffers_empty = false;
533 int r = put_logs_into_vector(state->data_fds, state->fd_count, &state->logs, state->filter_object, is_limited_dumping, &state->need_epoll, &all_buffers_empty, out);
535 /* Putting logs into the vector can cause extra reads,
536 * which means a buffer might have gotten drained here
537 * and not at the regular refill stage. */
538 remove_drained_buffers(state);
543 set_flush_target(state, all_buffers_empty, last_log_age);
546 return TIZEN_ERROR_NONE;
547 else if (end_at != -1) {
548 clock_gettime(CLOCK_MONOTONIC, &now);
549 if (end_at <= now.tv_sec * 1000 + now.tv_nsec / 1000000)
550 return TIZEN_ERROR_TIMED_OUT;
552 assert(now.tv_nsec == -1); // Could be now.tv_nsec = 1, but this is slightly faster
556 bool all_buffers_empty;
558 all_buffers_empty = false;
559 r = put_logs_into_vector(state->data_fds, state->fd_count, &state->logs, state->filter_object, is_limited_dumping, &state->need_epoll, &all_buffers_empty, out);
560 } while (r == 0 && !all_buffers_empty && !*out);
564 return TIZEN_ERROR_NONE;
566 if (is_limited_dumping)
567 while (sort_vector_used_size(&state->logs) > state->dump_size)
568 sort_vector_pop(&state->logs);
570 if (!sort_vector_empty(&state->logs)) {
571 *out = sort_vector_pop_ret(&state->logs);
572 return TIZEN_ERROR_NONE;
575 return TIZEN_ERROR_NO_DATA;