#define ZLOGGER_BLOCK_SIZE (4 * ZLOGGER_KB)
#define ZLOGGER_BLOCK_MAP_COUNT (ZLOGGER_MAP_SIZE / ZLOGGER_BLOCK_SIZE)
#define ZLOGGER_BLOCK_COUNT (ZLOGGER_BUFFER_SIZE / ZLOGGER_BLOCK_SIZE)
+#define ZLOGGER_BITMAP_SIZE (ZLOGGER_BLOCK_COUNT / 8)
#define ZLOGGER_DATA_MAX (ZLOGGER_BLOCK_SIZE - sizeof(struct zlogger_header))
#define ZLOGGER_TAG_MAX (32)
#define ZLOGGER_IOCTL_COMMAND_CLEAR (20745322)
#define ZLOGGER_IOCTL_COMMAND_SET_DEFAULT_PRIORITY _IOW('a', 'a', uint32_t)
#define ZLOGGER_IOCTL_COMMAND_SET_DEFAULT_TAG _IOW('a', 'b', char* )
+#define ZLOGGER_IOCTL_COMMAND_GET_BITMAP _IOR('a', 'c', void *)
struct zlogger_entry {
__u64 time;
#define MAX_BUF_LEN 255
+#define ZLOGGER_BITMAP_ARRAY_SIZE \
+ (ZLOGGER_BITMAP_SIZE / sizeof(unsigned long))
+
#if ZLOGGER_BLOCK_SIZE != PAGE_SIZE
#error Only ZLOGGER_BLOCK_SIZE == PAGE_SIZE is supported
#endif
uint64_t last_ts;
};
+struct zlog_dump_file {
+ struct list_head list;
+ unsigned long pending_blocks[ZLOGGER_BITMAP_ARRAY_SIZE];
+};
+
/* --zlogger file channel */
static struct miscdevice zlogger_device;
static uint32_t g_err_count;
+static LIST_HEAD(g_bitmaps);
+static unsigned long g_active_blocks[ZLOGGER_BITMAP_ARRAY_SIZE];
+
static int g_zlog_enable = 1;
module_param_named(zlog_enable, g_zlog_enable, int, 0644);
static int zlogger_unmap(struct thread_table_field *ptr);
+static void mark_block_active_in_bitmaps(uint16_t blk)
+{
+ uint16_t index = blk - 1;
+ mutex_is_locked(&g_block_mutex);
+ set_bit(index, g_active_blocks);
+}
+
+static void mark_block_done_in_bitmaps(uint16_t blk)
+{
+ struct zlog_dump_file *p;
+ uint16_t index = blk - 1;
+
+ mutex_is_locked(&g_block_mutex);
+
+ clear_bit(index, g_active_blocks);
+ list_for_each_entry(p, &g_bitmaps, list)
+ set_bit(index, p->pending_blocks);
+}
+
static struct thread_table_field *get_thread_table(pid_t tid, bool is_stdout)
{
struct thread_table_field *ptr = NULL;
ptr = get_thread_table(tid, is_stdout);
if (ptr) {
+ if (ptr->blk)
+ mark_block_done_in_bitmaps(ptr->blk);
+ mark_block_active_in_bitmaps(blk);
ptr->blk = blk;
return ptr;
}
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
+ mark_block_active_in_bitmaps(blk);
ptr->tid = tid;
ptr->is_stdout = is_stdout;
ptr->blk = blk;
{
mutex_is_locked(&g_block_mutex);
+ if (ptr->blk)
+ mark_block_done_in_bitmaps(ptr->blk);
hash_del(&ptr->next);
kfree(ptr);
}
ptr->blk = 0;
zlogger_unmap(ptr);
+ mark_block_done_in_bitmaps(blk);
queue_push(&g_free_q, blk);
}
ptr->blk = 0;
zlogger_unmap(ptr);
+ mark_block_done_in_bitmaps(blk);
queue_push(&g_free_q, blk);
}
memset(&block->head, 0, sizeof(block->head));
}
+ /* Regarding bitmaps, it might be tempting to handle this operation in a
+ * special way, since we actually know there are atomically no logs after
+ * performing it. However, the performance gain is quite dubious, as the
+ * clear operation is almost never used in production. All the performance
+ * gains would only matter in the first bitmap read anyway, since we only
+ * have to look at the unmapped blocks once. Meanwhile, implementing this
+ * optimization would make it quite difficult to test due to extra races.
+ */
+
mutex_unlock(&g_block_mutex);
return 0;
.owner = THIS_MODULE,
};
+static int zlogger_dump_open(struct inode *inode, struct file *file)
+{
+ struct zlog_dump_file *zlog_dump_file_data;
+ int ret;
+
+ /* We call kzalloc and not kmalloc, so bitmap is initially clear. */
+ zlog_dump_file_data =
+ kzalloc(sizeof(struct zlog_dump_file), GFP_KERNEL);
+ if (!zlog_dump_file_data)
+ return -ENOMEM;
+
+ ret = nonseekable_open(inode, file);
+ if (ret) {
+ kfree(zlog_dump_file_data);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&zlog_dump_file_data->list);
+ mutex_lock(&g_block_mutex);
+ list_add_tail(&zlog_dump_file_data->list, &g_bitmaps);
+ mutex_unlock(&g_block_mutex);
+ file->private_data = zlog_dump_file_data;
+
+ return 0;
+}
+
+static int zlogger_dump_release(struct inode *ignored, struct file *file)
+{
+ struct zlog_dump_file *zlog_dump_file_data =
+ (struct zlog_dump_file *)file->private_data;
+ mutex_lock(&g_block_mutex);
+ list_del(&zlog_dump_file_data->list);
+ mutex_unlock(&g_block_mutex);
+ kfree(zlog_dump_file_data);
+ file->private_data = NULL;
+ return 0;
+}
+
static int zlogger_dump_mmap(struct file *filep, struct vm_area_struct *vma)
{
const int PAGES_PER_MAP = ZLOGGER_MAP_SIZE / PAGE_SIZE;
return remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size, vma->vm_page_prot);
}
+static long zlogger_get_bitmap(struct file *filp, unsigned long arg)
+{
+ struct zlog_dump_file *zlog_dump_file_data =
+ (struct zlog_dump_file *)filp->private_data;
+ void __user *argp = (void __user *)arg;
+ int ret;
+ unsigned long final[ZLOGGER_BITMAP_ARRAY_SIZE];
+
+ mutex_lock(&g_block_mutex);
+
+ bitmap_or(final, g_active_blocks, zlog_dump_file_data->pending_blocks,
+ ZLOGGER_BLOCK_COUNT);
+
+ ret = copy_to_user(argp, &final, ZLOGGER_BITMAP_SIZE);
+ if (ret) {
+ mutex_unlock(&g_block_mutex);
+ return -EFAULT;
+ }
+
+ bitmap_zero(zlog_dump_file_data->pending_blocks, ZLOGGER_BLOCK_COUNT);
+
+ mutex_unlock(&g_block_mutex);
+ return 0;
+}
+
static long zlogger_dump_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case ZLOGGER_IOCTL_COMMAND_CLEAR:
return zlogger_clear();
+ case ZLOGGER_IOCTL_COMMAND_GET_BITMAP:
+ return zlogger_get_bitmap(filp, arg);
+
default:
return -EINVAL;
}
}
+#ifdef CONFIG_COMPAT
+static long zlogger_dump_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ if (cmd == ZLOGGER_IOCTL_COMMAND_GET_BITMAP)
+ return zlogger_dump_ioctl(
+ filp, cmd, (unsigned long)compat_ptr(arg));
+ return zlogger_dump_ioctl(filp, cmd, arg);
+}
+#endif
+
static const struct file_operations zlogger_dump_fops = {
- .open = nonseekable_open,
+ .open = zlogger_dump_open,
+ .release = zlogger_dump_release,
.mmap = zlogger_dump_mmap,
.unlocked_ioctl = zlogger_dump_ioctl,
#ifdef CONFIG_COMPAT
- /* arg is unused so far, so we can call ioctl directly */
- .compat_ioctl = zlogger_dump_ioctl,
+ .compat_ioctl = zlogger_dump_compat_ioctl,
#endif
.owner = THIS_MODULE,
};
make -C tests/kdbus
make -C tests/logger
make -C tests/zlog_stdout
+make -C tests/zlog_bitmap
%install
mkdir -p %{buildroot}/usr/src/%{name}/kdbus
cp tests/logger/logger-test %{buildroot}/%{_libexecdir}/%{name}/tests/logger-test
cp tests/kdbus/kdbus-test %{buildroot}/%{_libexecdir}/%{name}/tests/kdbus-test
cp tests/zlog_stdout/zlog_stdout %{buildroot}/%{_libexecdir}/%{name}/tests/zlog_stdout
+cp tests/zlog_bitmap/zlog_bitmap %{buildroot}/%{_libexecdir}/%{name}/tests/zlog_bitmap
%files
%manifest %{name}.manifest
%{_libexecdir}/%{name}/tests/logger-test
%{_libexecdir}/%{name}/tests/kdbus-test
%{_libexecdir}/%{name}/tests/zlog_stdout
+%{_libexecdir}/%{name}/tests/zlog_bitmap
--- /dev/null
+CC := $(CROSS_COMPILE)gcc
+CFLAGS += -I../../include/uapi
+CFLAGS += -std=gnu11
+CFLAGS += -D_GNU_SOURCE
+CFLAGS += -Wno-error=unused-result
+LDFLAGS = -pthread
+
+.PHONY: all clean
+
+TEST_CUSTOM_PROGS := zlog_bitmap
+all: $(TEST_CUSTOM_PROGS)
+
+OBJS = \
+ zlog_bitmap.o
+
+$(TEST_CUSTOM_PROGS): $(OBJS)
+ $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(LDFLAGS)
+
+$(OBJS): %.o: %.c
+ $(CC) -c $^ -o $@ $(CFLAGS)
+
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(OBJS)
+
+clean:
+ $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
--- /dev/null
+/*
+ * tests/zlog_bitmap/zlog_bitmap.c
+ *
+ * Zero-copy logging for Tizen
+ *
+ * Copyright (C) 2022 Samsung Electronics Co., Ltd
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <linux/zlogger.h>
+
+/* Testing this functionality is not as trivial as one might think.
+ * In reality, the bitmaps might be used by doing in a loop two things:
+ * 1. asking for a bitmap,
+ * 2. reading the blocks that are marked in a bitmap.
+ * Of course we cannot do those steps atomically; we can't even do 2. itself
+ * atomically. This means a race condition is possible and we might miss some
+ * logs. But that is not an issue, since the missed logs will either be cleared
+ * or read in the next iteration.
+ *
+ * In this test, we want to check if no logs are missed. So we instead read all
+ * blocks and compare with an old snapshot; the test is passed iff there are no
+ * logs in the blocks that wouldn't be read normally. However, the same
+ * race condition is still possible, and would mean test flakiness. We resolve
+ * this by asking for another bitmap after reading the blocks and ORing it in.
+ *
+ * To sum up, in the test, we do the following in a loop:
+ * 1. copy the entire memory,
+ * 2. wait a bit for more logs to appear,
+ * 3. take a bitmap,
+ * 4. read all the blocks, checking which blocks have changed,
+ * 5. take another bitmap,
+ * 6. we compare the result of step 4. with the OR of both bitmaps.
+ *
+ * Finally, let's explain the specifics of how we check which blocks have been
+ * modified in step 4. We consider a block modified if at least a single bit
+ * has changed, but only if the new offset is not 0. If the offset is 0, then
+ * the block has no logs inside, and by definition we are not missing any logs.
+ */
+
+int main(int argc, char *argv[])
+{
+ int fd, ret = 0, i, j, k, result[2][2];
+ long it, iteration_number, sleep_time;
+ char *bitmap_pre, *bitmap_post, *mem_snapshot;
+ bool *modified;
+ volatile char *mmap_addr;
+
+ if (argc != 3) {
+ fprintf(stderr, "Usage: %s ITERATIONS SECONDS\n", argv[0]);
+ goto exit;
+ }
+
+ errno = 0;
+ iteration_number = strtol(argv[1], NULL, 10);
+ sleep_time = strtol(argv[2], NULL, 10);
+ if (iteration_number <= 0 || sleep_time <= 0 || errno != 0) {
+ fprintf(stderr, "Inputs must be positive integers\n");
+ goto exit;
+ }
+
+ fd = open("/dev/zlogger_dump", O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "Error opening zlog device\n");
+ ret = -1;
+ goto exit;
+ }
+
+ mem_snapshot = malloc(ZLOGGER_BUFFER_SIZE);
+ if (!mem_snapshot) {
+ fprintf(stderr, "Error mallocing\n");
+ ret = -1;
+ goto close_fd;
+ }
+
+ bitmap_pre = malloc(ZLOGGER_BITMAP_SIZE);
+ if (!bitmap_pre) {
+ fprintf(stderr, "Error mallocing\n");
+ ret = -1;
+ goto free_mem_snapshot;
+ }
+
+ modified = calloc(ZLOGGER_BLOCK_COUNT, sizeof(*modified));
+ if (!modified) {
+ fprintf(stderr, "Error mallocing\n");
+ ret = -1;
+ goto free_bitmap_pre;
+ }
+
+ bitmap_post = malloc(ZLOGGER_BITMAP_SIZE);
+ if (!bitmap_post) {
+ fprintf(stderr, "Error mallocing\n");
+ ret = -1;
+ goto free_modified;
+ }
+
+ for (it = 1; it <= iteration_number; ++it) {
+ for (i = 0; i < ZLOGGER_DEVICE_COUNT; ++i) {
+ mmap_addr = mmap(NULL, ZLOGGER_MAP_SIZE, PROT_READ, MAP_SHARED,
+ fd, i * ZLOGGER_MAP_SIZE);
+ if (mmap_addr == MAP_FAILED) {
+ fprintf(stderr, "Error mmaping\n");
+ goto free_bitmap_post;
+ }
+ for (j = 0; j < ZLOGGER_MAP_SIZE; ++j)
+ mem_snapshot[i * ZLOGGER_MAP_SIZE + j] = mmap_addr[j];
+ munmap((void *)mmap_addr, ZLOGGER_MAP_SIZE);
+ }
+
+ sleep(sleep_time);
+
+ ret = ioctl(fd, ZLOGGER_IOCTL_COMMAND_GET_BITMAP, bitmap_pre);
+ if (ret < 0) {
+ fprintf(stderr, "Error ioctling\n");
+ goto free_bitmap_post;
+ }
+
+ for (i = 0; i < ZLOGGER_DEVICE_COUNT; ++i) {
+ mmap_addr = mmap(NULL, ZLOGGER_MAP_SIZE, PROT_READ, MAP_SHARED,
+ fd, i * ZLOGGER_MAP_SIZE);
+ if (mmap_addr == MAP_FAILED) {
+ fprintf(stderr, "Error mmaping\n");
+ goto free_bitmap_post;
+ }
+ for (j = 0; j < ZLOGGER_BLOCK_MAP_COUNT; ++j) {
+ bool changed = true;
+ volatile struct zlogger_block *b =
+ (volatile void *)mmap_addr +
+ ZLOGGER_BLOCK_SIZE * j;
+
+ changed &= b->head.offset != 0;
+ changed &= memcmp(mem_snapshot + i * ZLOGGER_MAP_SIZE +
+ j * ZLOGGER_BLOCK_SIZE,
+ (void *)b, ZLOGGER_BLOCK_SIZE);
+ modified[i * ZLOGGER_BLOCK_MAP_COUNT + j] = changed;
+ }
+ munmap((void *)mmap_addr, ZLOGGER_MAP_SIZE);
+ }
+
+ ret = ioctl(fd, ZLOGGER_IOCTL_COMMAND_GET_BITMAP, bitmap_post);
+ if (ret < 0) {
+ fprintf(stderr, "Error ioctling\n");
+ goto free_bitmap_post;
+ }
+
+ result[false][false] = result[false][true] = result[true][false] =
+ result[true][true] = 0;
+
+ for (i = 0; i < ZLOGGER_BLOCK_COUNT; ++i) {
+ bool enabled = bitmap_pre[i / 8] & (1 << (i % 8)) ||
+ bitmap_post[i / 8] & (1 << (i % 8));
+ bool changed = modified[i];
+
+ result[enabled][changed] += 1;
+ }
+
+ printf("Iteration %ld:\n", it);
+ printf("True positive (blocks with new logs that were correctly read): %d\n",
+ result[true][true]);
+ printf("False positive (blocks without new logs that could not have been skipped; unoptimal, but acceptable): %d\n",
+ result[true][false]);
+ printf("True negative (blocks without new logs that were correctly skipped): %d\n",
+ result[false][false]);
+ printf("False negative (blocks with new logs that were incorrectly skipped; bug if nonzero): %d\n",
+ result[false][true]);
+ if (result[false][true] != 0)
+ ret = -127;
+ }
+
+free_bitmap_post:
+ free(bitmap_post);
+free_modified:
+ free(modified);
+free_bitmap_pre:
+ free(bitmap_pre);
+free_mem_snapshot:
+ free(mem_snapshot);
+close_fd:
+ close(fd);
+exit:
+ return ret;
+}