3 * Copyright (c) 2019-2020 Samsung Electronics Co., Ltd
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is furnished
10 * to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include <dlogutil-internal.h>
25 #include <queued_entry_timestamp.h>
28 #include "fdi_logger.h"
29 #include "fdi_zero_copy.h"
30 #include "logretrieve.h"
36 static const struct fd_ops *get_ops_by_buffer(size_t buf_id, int enabled_buffers, backend_t backend, bool is_compressed_memory)
38 if (!bit_test(enabled_buffers, buf_id))
41 /* These are implemented by the pipe daemon,
42 * regardless of the nominal backend. */
43 if (is_compressed_memory
44 || buf_id == LOG_ID_KMSG
45 || buf_id == LOG_ID_SYSLOG)
49 case BACKEND_PIPE : return &ops_pipe;
50 case BACKEND_ANDROID_LOGGER: return &ops_logger;
51 case BACKEND_ZERO_COPY : return &ops_zero_copy;
52 default : return NULL;
56 static size_t get_required_fdi_capacity(const struct fd_ops *ops_by_buffer[static LOG_ID_MAX])
58 size_t ret = 1; // NULL terminator
59 for (size_t i = 0; i < LOG_ID_MAX; ++i)
65 int create_initial_fdis(struct fd_info ***fdis, int enabled_buffers, backend_t backend, bool is_compressed_memory, const struct log_config *conf, log_id_t aliased[LOG_ID_MAX])
69 assert(backend != BACKEND_NONE);
70 assert(backend != BACKEND_NULL);
72 const struct fd_ops *ops_by_buffer[LOG_ID_MAX];
73 for (size_t i = 0; i < NELEMS(ops_by_buffer); ++i)
74 ops_by_buffer[i] = get_ops_by_buffer(i, enabled_buffers, backend, is_compressed_memory);
77 __attribute__((cleanup(fdi_array_free))) struct fd_info **fdi_ptrs = calloc(get_required_fdi_capacity(ops_by_buffer), sizeof *fdi_ptrs);
79 return TIZEN_ERROR_OUT_OF_MEMORY;
81 __attribute__ ((cleanup(list_clear_free_contents))) list_head used_paths = NULL;
83 for (int i = 0; i < LOG_ID_MAX; ++i) {
84 const struct fd_ops *const ops = ops_by_buffer[i];
88 struct fd_info *const fdi = fdi_create(ops, i);
92 int r = fdi->ops->create(fdi, conf, i, &used_paths, &aliased[i]);
96 // TODO: Inform about that again somewhere else (except if r == -EALREADY and aliased[i] != INVALID)
100 fdi_ptrs[fdi_cnt++] = fdi;
104 return -EINVAL; // TODO: This has literally nothing to do with EINVAL
112 struct fd_info *find_earliest_log(struct fd_info **data_fds, int fd_count, dlogutil_sorting_order_e sort_by)
116 struct fd_info *best_fdi = NULL;
117 struct timespec best_ts = {};
119 for (int i = 0; i < fd_count; ++i) {
120 struct fd_info *const fdi = data_fds[i];
121 if (!fdi || !fdi->ops->has_log(fdi))
124 const dlogutil_entry_s *const le = fdi->ops->peek_entry(fdi);
125 struct timespec current_ts;
126 if (dlogutil_entry_get_timestamp(le, sort_by, ¤t_ts) != TIZEN_ERROR_NONE)
127 return fdi; // the timestampless log is always the best choice to flush ASAP
129 if (best_fdi && ((current_ts.tv_sec > best_ts.tv_sec) || (current_ts.tv_sec == best_ts.tv_sec && current_ts.tv_nsec > best_ts.tv_nsec)))
132 best_ts = current_ts;
139 int put_logs_into_vector(struct fd_info **data_fds, int fd_count, struct sort_vector *logs, struct log_filter *filter, bool limited_dump, bool *at_least_one_buffer_drained, bool *all_buffers_empty, dlogutil_entry_s **entry_out)
147 struct fd_info *best_fdi;
149 best_fdi = find_earliest_log(data_fds, fd_count, logs->sort_by);
151 *all_buffers_empty = true;
152 *at_least_one_buffer_drained = true;
153 return TIZEN_ERROR_NONE;
156 int r = fdi_push_log(best_fdi, logs, limited_dump, entry_out, filter);
161 if (!best_fdi->ops->has_log(best_fdi))
162 *at_least_one_buffer_drained = true;
163 return TIZEN_ERROR_NONE;
169 } while (best_fdi->ops->has_log(best_fdi)); // if a buffer got drained, break to give them all a chance to refill
171 *at_least_one_buffer_drained = true;
172 return TIZEN_ERROR_NONE;
175 static int state_prepare_single_print(dlogutil_state_s *state, int nfds, char *compress)
178 assert(state->mode != DLOGUTIL_MODE_NONPRINTING);
180 int r = state->data_fds[nfds]->ops->prepare_print(state->data_fds[nfds], state->dump_size, state->mode == DLOGUTIL_MODE_MONITOR, state->filter_object, compress);
182 return TIZEN_ERROR_IO_ERROR;
184 // everything went fine, but we're not printing this one
185 fdi_free(state->data_fds[nfds]);
186 state->data_fds[nfds] = NULL;
187 return TIZEN_ERROR_NONE;
190 int fd = state->data_fds[nfds]->fd;
191 r = fd_set_flags(fd, O_NONBLOCK);
193 return TIZEN_ERROR_IO_ERROR;
194 struct epoll_event ev = {
195 .data.ptr = state->data_fds[nfds],
198 state->enabled[nfds] = true;
200 if (!state->data_fds[nfds]->ops->poll_timeout) {
201 epoll_ctl(state->epollfd, EPOLL_CTL_ADD, fd, &ev);
204 ++ state->non_epoll_cnt;
207 return TIZEN_ERROR_NONE;
210 static int state_prepare_prints(dlogutil_state_s *state, char *compress)
213 assert(state->mode != DLOGUTIL_MODE_NONPRINTING);
215 state->enabled = calloc(state->fd_count, sizeof *state->enabled);
217 return TIZEN_ERROR_OUT_OF_MEMORY;
219 for (int nfds = 0; nfds < state->fd_count; ++nfds) {
220 int r = state_prepare_single_print(state, nfds, compress);
221 if (r != TIZEN_ERROR_NONE)
225 return TIZEN_ERROR_NONE;
228 int dlogutil_state_init(dlogutil_state_s *state, struct fd_info ***data_fds_ptr, int fd_count, dlogutil_config_s *config, bool sorting_needed, dlogutil_sorting_order_e real_sort_by, const struct log_config *conf, log_id_t aliased[LOG_ID_MAX])
230 assert(data_fds_ptr);
231 struct fd_info **data_fds = *data_fds_ptr;
236 // TODO: optimize mode == DLOGUTIL_MODE_NONPRINTING case
238 state->epoll_cnt = 0;
239 state->non_epoll_cnt = 0;
240 state->enabled = NULL;
241 state->filter_object = NULL;
243 state->fd_count = fd_count;
244 state->data_fds = data_fds;
245 state->mode = config->mode;
246 state->need_epoll = true;
247 state->flush_target = LONG_MAX;
248 state->dump_size = (config->mode == DLOGUTIL_MODE_DUMP) ? config->dump_size : 0;
249 *data_fds_ptr = NULL;
251 for (int i = 0; i < LOG_ID_MAX; ++i)
252 state->aliased[i] = aliased[i];
254 sort_vector_init(&state->logs);
256 state->epollfd = epoll_create1(0);
257 if (state->epollfd < 0)
258 return TIZEN_ERROR_IO_ERROR;
260 state->filter_object = log_filter_from_filter(&config->filter);
261 if (!state->filter_object)
262 return TIZEN_ERROR_OUT_OF_MEMORY;
263 if (log_filter_need_apply_default(state->filter_object) && log_filter_set_filterspec(state->filter_object, "*:D"))
264 return TIZEN_ERROR_OUT_OF_MEMORY;
266 sort_vector_apply_config(&state->logs, conf);
267 state->logs.size = sorting_needed ? config->logs_size : 0;
268 state->logs.sort_by = real_sort_by;
269 clock_gettime(get_proper_clock(real_sort_by), &state->logs.start);
271 if (config->mode == DLOGUTIL_MODE_DUMP && state->dump_size != DLOGUTIL_MAX_DUMP_SIZE)
272 state->logs.size = state->dump_size + 1;
274 if (!sort_vector_finalize(&state->logs))
275 return TIZEN_ERROR_OUT_OF_MEMORY;
277 if (state->mode != DLOGUTIL_MODE_NONPRINTING) {
278 int r = state_prepare_prints(state, config->compress);
279 if (r != TIZEN_ERROR_NONE)
283 if (state->epoll_cnt) {
284 state->evs = calloc(state->epoll_cnt, sizeof *state->evs);
286 return TIZEN_ERROR_OUT_OF_MEMORY;
289 return TIZEN_ERROR_NONE;
292 static bool flush_single_log(dlogutil_state_s *state, dlogutil_entry_s **out)
296 if (state->flush_target >= LONG_MAX
297 || sort_vector_empty(&state->logs))
300 dlogutil_entry_s *const e = sort_vector_back(&state->logs);
301 struct timespec log_ts;
302 if (!dlogutil_entry_get_timestamp(e, state->logs.sort_by, &log_ts)
303 && log_ts.tv_sec * 1000 + log_ts.tv_nsec / 1000000 > state->flush_target)
306 *out = sort_vector_pop_ret(&state->logs);
310 static bool handle_flush(dlogutil_state_s *state, dlogutil_entry_s **out)
314 bool const r = flush_single_log(state, out);
316 state->flush_target = LONG_MAX;
320 static void remove_drained_buffers(dlogutil_state_s *state)
322 assert(state->enabled);
324 /* Instead of removing buffers when .read returns 0, we do it
325 * in a separate loop, using their .eof "method". This is because
326 * removing buffers that way would fail if EOF would be triggered
327 * somewhere else and the buffer would never be .read again. */
329 for (int i = 0; i < state->fd_count; ++i) {
330 if (!state->enabled[i])
333 const struct fd_info *const fdi = state->data_fds[i];
334 if (!fdi->ops->eof(fdi))
337 state->enabled[i] = false;
339 if (!fdi->ops->poll_timeout) {
340 epoll_ctl(state->epollfd, EPOLL_CTL_DEL, fdi->fd, NULL);
343 -- state->non_epoll_cnt;
348 static int refill_fdi_buffers_regular(dlogutil_state_s *state, int timeout)
350 if (!state->epoll_cnt)
353 int const nfds = epoll_wait(state->epollfd, state->evs, state->epoll_cnt, timeout);
357 for (int i = 0; i < nfds; ++i) {
358 struct fd_info *const fdi = state->evs[i].data.ptr;
359 int const rd = fdi->ops->read(fdi);
360 if (rd < 0 && rd != -EAGAIN)
367 static int how_long_would_we_have_to_wait(dlogutil_state_s *state)
369 /* Pick the smallest timeout. We're using epoll timeout
370 * convention where -1 means infinite. This is mostly
371 * for future-proofing since the only client using this
372 * is the zero-copy backend which only has 1 FDI anyway. */
375 for (int i = 0; i < state->fd_count; ++i) {
376 if (!state->enabled[i])
379 const struct fd_info *const fdi = state->data_fds[i];
380 if (fdi->ops->eof(fdi))
383 const int timeout = fdi->ops->poll_timeout(fdi);
384 if (timeout != -1 && (timeout < ret || ret == -1))
390 static int refill_fdi_buffers_special(dlogutil_state_s *state, int timeout)
392 if (!state->non_epoll_cnt)
395 /* Figure out how long we would have to wait. Sleep until then, or at least
396 * as long as we are allowed to (can't always pull off an all-nighter). */
397 int needed_timeout = how_long_would_we_have_to_wait(state);
398 if (needed_timeout > timeout && timeout != -1) {
399 usleep(timeout * USEC_PER_MSEC);
403 // FIXME: can the sleep be interrupted via signals etc?
404 // also FIXME: can `needed_timeout` be -1 at this point?
406 usleep(needed_timeout * USEC_PER_MSEC);
407 for (int i = 0; i < state->fd_count; ++i) {
408 if (!state->enabled[i])
411 struct fd_info *const fdi = state->data_fds[i];
412 if (fdi->ops->eof(fdi))
415 int const rd = fdi->ops->read(fdi);
416 if (rd < 0 && rd != -EAGAIN)
423 static int refill_fdi_buffers(dlogutil_state_s *state, int timeout)
427 /* Both of the two modes below perform a timeout-limited
428 * sleep; they can't both do it at the same time or we
429 * would exceed the timeout. Fortunately they don't live
430 * together, so we can assume only one of the two will run. */
431 assert(state->non_epoll_cnt == 0 || state->epoll_cnt == 0);
433 /* Regular file descriptors are handled via a collective epoll
434 * to minimize the number of performed syscalls. */
435 r = refill_fdi_buffers_regular(state, timeout);
439 /* Special needs file descriptors have their own
440 * means of figuring out if they're ready to refill. */
441 r = refill_fdi_buffers_special(state, timeout);
448 static void set_flush_target(dlogutil_state_s *state, bool all_buffers_empty, long last_log_age)
450 /* For the dumping modes, don't flush since that just removes the
451 * ability to sort flushed and incoming logs against each other. */
452 if (state->mode == DLOGUTIL_MODE_DUMP)
455 /* The oldest log can be so fresh as to be from the future
456 * (i.e. has a negative age). This can happen when somebody
457 * changes the realtime clock backwards. In that case let
458 * such logs be force-flushed immediately since the realtime
459 * timestamp has lost its meaning and the monotonic timestamp
460 * might not be present. Otherwise the log would wait until
461 * the clock reaches its old value again. */
462 if (last_log_age < 0) {
463 state->flush_target = -1;
467 /* If all buffers got drained, we make the assumption that any logs
468 * yet to come would sort later than the ones we already have, so
469 * we can reasonably flush in that case.
471 * Otherwise, wait until the first burst of logs gets processed
472 * before flushing anything based on the timeout. Approximately all
473 * logs from the initial burst are old enough to be flushable right
474 * when they appear, but we want to wait a bit (through the second
475 * condition, which ends when the initial burst is processed fully)
476 * to make extra sure they're sorted correctly against each other. */
477 if (!all_buffers_empty && (!state->logs.old_logs_dumped || last_log_age < state->logs.timeout))
480 struct timespec reference_ts;
481 clock_gettime(get_proper_clock(state->logs.sort_by), &reference_ts);
482 state->flush_target = reference_ts.tv_sec * 1000 + reference_ts.tv_nsec / 1000000 - state->logs.timeout;
485 static int get_timeout(dlogutil_state_s *state, int timeout, long last_log_age)
487 if (sort_vector_empty(&state->logs))
490 int max_timeout = state->logs.timeout - last_log_age;
494 return timeout == -1 || timeout > max_timeout ? max_timeout : timeout;
498 * @brief Handle input
499 * @details The main loop reading log data
500 * @param[in] data_fds An array of FDs to read data from
501 * @param[in] fd_count Size of the above array
502 * @param[in] logs The log sorting vector
503 * @param[in] l_file File output metadata
504 * @return int 0 if successful, a negative value on error, callback's return value if it isn't 0
506 int do_print_once(dlogutil_state_s *state, int timeout, dlogutil_entry_s **out)
510 struct timespec now = { .tv_nsec = -1 };
513 clock_gettime(CLOCK_MONOTONIC, &now);
514 end_at = now.tv_sec * 1000 + now.tv_nsec / 1000000 + timeout;
517 const bool is_limited_dumping = state->mode == DLOGUTIL_MODE_DUMP
518 && state->dump_size != DLOGUTIL_MAX_DUMP_SIZE;
520 while (state->epoll_cnt > 0 || state->non_epoll_cnt > 0) {
521 /* Note: sort_vector_time_span assumes that either now.tv_nsec == -1,
522 * or now is the current CLOCK_MONOTONIC timestamp. If you change the clock,
523 * make sure to change that function! */
524 const long last_log_age = sort_vector_time_span(&state->logs, now);
525 timeout = get_timeout(state, timeout, last_log_age);
527 if (state->need_epoll) {
528 int r = refill_fdi_buffers(state, timeout);
530 return TIZEN_ERROR_IO_ERROR;
531 state->need_epoll = false;
534 if (handle_flush(state, out))
535 return TIZEN_ERROR_NONE;
537 bool all_buffers_empty = false;
538 int r = put_logs_into_vector(state->data_fds, state->fd_count, &state->logs, state->filter_object, is_limited_dumping, &state->need_epoll, &all_buffers_empty, out);
540 /* Putting logs into the vector can cause extra reads,
541 * which means a buffer might have gotten drained here
542 * and not at the regular refill stage. */
543 remove_drained_buffers(state);
548 set_flush_target(state, all_buffers_empty, last_log_age);
551 return TIZEN_ERROR_NONE;
552 else if (end_at != -1) {
553 clock_gettime(CLOCK_MONOTONIC, &now);
554 if (end_at <= now.tv_sec * 1000 + now.tv_nsec / 1000000)
555 return TIZEN_ERROR_TIMED_OUT;
557 assert(now.tv_nsec == -1); // Could be now.tv_nsec = 1, but this is slightly faster
561 bool all_buffers_empty;
563 all_buffers_empty = false;
564 r = put_logs_into_vector(state->data_fds, state->fd_count, &state->logs, state->filter_object, is_limited_dumping, &state->need_epoll, &all_buffers_empty, out);
565 } while (r == 0 && !all_buffers_empty && !*out);
569 return TIZEN_ERROR_NONE;
571 if (is_limited_dumping)
572 while (sort_vector_used_size(&state->logs) > state->dump_size)
573 sort_vector_pop(&state->logs);
575 if (!sort_vector_empty(&state->logs)) {
576 *out = sort_vector_pop_ret(&state->logs);
577 return TIZEN_ERROR_NONE;
580 return TIZEN_ERROR_NO_DATA;