-DLOGGER_SYSTEM_BUFFER_SIZE=$(LOGGER_SYSTEM_BUFFER_SIZE)
CFLAGS_proc-tsm.o += -Wno-error=missing-attributes
+
# Build is disabled by default so that when new module is added to this repository (and -source package),
# it won't get automatically build in packages using it (that would break these automatically as file list
# would no longer match).
BUILD_logger ?= n
BUILD_proc_tsm ?= n
BUILD_kdbus ?= n
+BUILD_vlog ?= n
obj-$(BUILD_logger) += logger.o
obj-$(BUILD_proc_tsm) += proc-tsm.o
obj-$(BUILD_kdbus) += kdbus/
+obj-$(BUILD_vlog) += vlogger/
all:
- make -C $(KERNELDIR) M=$(PWD) CFLAGS_MODULE=-I$(PWD)/include modules
+ make -C $(KERNELDIR) M=$(PWD) CFLAGS_MODULE=-I$(PWD)/include modules
modules_install:
make -C $(KERNELDIR) M=$(PWD) INSTALL_MOD_STRIP=$(INSTALL_MOD_STRIP) INSTALL_MOD_PATH=$(PWD)/../$(INSTALL_MOD_PATH) modules_install
--- /dev/null
+ccflags-y += -Wno-error=missing-attributes -I$(src)/include_internal
+
+vlog-y := logger_main.o logger_policy.o logger_filter.o logger_stats.o logger_ext.o vlogger.o
+obj-m += vlog.o
--- /dev/null
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * struct user_logger_entry_compat - defines a single entry that is given to a logger
+ * @len: The length of the payload
+ * @__pad: Two bytes of padding that appear to be required
+ * @pid: The generating process' process ID
+ * @tid: The generating process' thread ID
+ * @sec: The number of seconds that have elapsed since the Epoch
+ * @nsec: The number of nanoseconds that have elapsed since @sec
+ * @msg: The message that is to be logged
+ *
+ * The userspace structure for version 1 of the logger_entry ABI.
+ * This structure is returned to userspace unless the caller requests
+ * an upgrade to a newer ABI version.
+ */
+struct user_logger_entry_compat {
+ __u16 len;
+ __u16 __pad;
+ __s32 pid;
+ __s32 tid;
+ __s32 sec;
+ __s32 nsec;
+ char msg[0];
+};
+
+/**
+ * struct logger_entry - defines a single entry that is given to a logger
+ * @len: The length of the payload
+ * @pid: The generating process' process ID
+ * @tid: The generating process' thread ID
+ * @sec: The number of seconds that have elapsed since the Epoch
+ * @nsec: The number of nanoseconds that have elapsed since @sec
+ * @euid: Effective UID of logger
+ * @hash: The hashkey of tagname
+ * @logger_id: The target log buffer id for reading
+ * @msg: The message that is to be logged
+ *
+ * The structure for version 2 of the logger_entry ABI.
+ * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
+ * is called with version >= 2
+ * WARNING: In case of version >= 2, ext_buf can break compatability with 32 bit user on 64 bit kernel
+ */
+struct logger_entry {
+ __u16 len;
+ __s32 pid;
+ __s32 tid;
+ __s32 sec;
+ __s32 nsec;
+ kuid_t euid;
+ __u32 hash;
+ __s32 logger_id;
+ char msg[0];
+};
+
+/**
+ * struct logger_tag
+ * @len: Length of a NULL-terminated tag including '\0'
+ * @ptr: Pointer to a user buffer containing the tag
+ */
+struct logger_tag {
+ uint64_t len;
+ uint64_t ptr;
+};
+
+#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
+#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
+#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
+#define LOGGER_LOG_MAIN "log_main" /* everything else */
+#define LOGGER_SMACK_LABEL "*" /* default smack label in log nodes */
+
+#define LOGGER_BUFFER_SIZE (1 << 21); // 2 MB
+
+#define LOGGER_ENTRY_MAX_LEN (1 * 1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD (LOGGER_ENTRY_MAX_LEN - \
+ sizeof(struct user_logger_entry_compat))
+
+#define __LOGGERIO 0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
+#define LOGGER_GET_VERSION _IO(__LOGGERIO, 5) /* abi version */
+#define LOGGER_SET_VERSION _IO(__LOGGERIO, 6) /* abi version */
+#define LOGGER_SET_GLOBAL_PRIORITY _IO(__LOGGERIO, 8) /* Set global priority */
+#define LOGGER_SET_TAG_FILTER_INFO _IO(__LOGGERIO, 9) /* Set tag filter information */
+#define LOGGER_GET_BACKEND_FILTER_STATE _IO(__LOGGERIO, 10) /* Get state of backend filter */
+#define LOGGER_SET_PID _IO(__LOGGERIO, 11) /* Set target PID for log to read */
+#define LOGGER_SET_DEV_ID _IO(__LOGGERIO, 12) /* Set devices id */
+#define LOGGER_SET_EXTENDED_BUFFER _IO(__LOGGERIO, 14) /* Set reader to use extended buffer */
+#define LOGGER_SET_TAG _IO(__LOGGERIO, 15) /* set flow mode tag */
+#define LOGGER_SET_PRIO _IO(__LOGGERIO, 16) /* set flow mode prio */
+
+#endif /* _LINUX_LOGGER_H */
--- /dev/null
+/*
+ * logger_ext: Extended buffer for Logger
+ *
+ * Copyright (C) 2019 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LOGGER_EXT_H__
+#define __LOGGER_EXT_H__
+
+#include <stdbool.h>
+
+int logger_ext_get_buffer(size_t size);
+char *logger_ext_get_address(int offset);
+bool logger_ext_enable(void);
+bool logger_ext_disable(void);
+bool logger_ext_invalidate(size_t invalid, size_t invalid_max);
+#endif /* __LOGGER_EXT_H__ */
--- /dev/null
+/*
+ * logger_filter: Filter data structures for logger
+ *
+ * Copyright (C) 2018 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LOGGER_FILTER_H__
+#define __LOGGER_FILTER_H__
+
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <linux/compat.h>
+
+#define DLOG_MAX_TAGS 32
+#define DLOG_MAX_TAGNAME 32
+
+/**
+ * @brief log priority values, in ascending priority order.
+ */
+typedef enum {
+ DLOG_UNKNOWN = 0, /**< Keep this always at the start */
+ DLOG_DEFAULT, /**< Default */
+ DLOG_VERBOSE, /**< Verbose */
+ DLOG_DEBUG, /**< Debug */
+ DLOG_INFO, /**< Info */
+ DLOG_WARN, /**< Warning */
+ DLOG_ERROR, /**< Error */
+ DLOG_FATAL, /**< Fatal */
+ DLOG_SILENT, /**< Silent */
+ DLOG_PRIO_MAX /**< Keep this always at the end. */
+} log_priority;
+
+/**
+ * struct filter_info_t - Filter information for dlog tags and priorities.
+ * @tagname: Identifiable dlog tag name.
+ * @priority: indicates the estimated severity of the event that caused the log.
+ * @hentry: Linked entry list.
+ */
+typedef struct filter_info_t {
+ char *tagname;
+ log_priority priority;
+ struct hlist_node hentry;
+} filter_info;
+
+/**
+ * struct logger_tagname_registry - names registered for a filter.
+ * @entries_hash: Map of entries.
+ * @name_seq_last: Last used sequence number to assign to a name entry.
+ */
+struct logger_tagname_registry {
+ DECLARE_HASHTABLE(entries_hash, 7);
+ u32 name_seq_last;
+};
+
+/**
+ * struct filter_args_t - the representation of ioctl argument used by user and kernel
+ * @tagname: Identifiable dlog tag name.
+ * @priority: indicates the estimated severity of the event that caused the log.
+ */
+#ifdef CONFIG_COMPAT
+struct filter_args_compat_t {
+ compat_uptr_t tagname;
+ log_priority priority;
+};
+#endif
+struct filter_args_t {
+ char *tagname;
+ log_priority priority;
+};
+
+unsigned int logger_strhash(const char *str);
+struct logger_tagname_registry *logger_tagname_registry_new(void);
+void logger_tagname_registry_free(struct logger_tagname_registry *r);
+
+#endif /* __LOGGER_FILTER_H__ */
--- /dev/null
+/*
+ * logger_policy: A Logging Policy for Tizen TV.
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LOGGER_POLICY_H__
+#define __LOGGER_POLICY_H__
+
+#define LOGGER_POLICY_MISCDEV_NAME "logger_policy"
+#define LOGGER_POLICY_MAX_TAG_NAME 32
+#define LOGGER_POLICY_MAX_TAG_COUNT 10
+#define LOGGER_POLICY_SMACK_LABEL "*"
+#define LOGGER_POLICY_MAX_TASK_NAME 16
+
+enum logger_id {
+ LOGGER_ID_MAIN = 0,
+ LOGGER_ID_RADIO,
+ LOGGER_ID_SYSTEM,
+ LOGGER_ID_EVENTS,
+ NR_LOGGER_IDS,
+};
+
+/**
+ * struct policy_qos - structure to represent 'qos'
+ * @count : the number of logs used by a process associated @tgid
+ * @reset_timer : Next time when QoS data will be reset.
+ * @num_tags : the number of tags
+ * @tag_list : the list for tags
+ * @lock : the mutex to protect the @tag_list
+ * @activated : Mark whether logger QoS is activated.
+ * @qos_bypass : Mark whether logger QoS is bypass or not.
+ */
+struct policy_qos {
+ int count;
+ int reset_timer;
+ int num_tags;
+ struct list_head tag_list;
+ struct mutex lock;
+ bool activated;
+ bool qos_bypass;
+};
+
+/**
+ * struct policy_tag - structure to represent TAG
+ * @name: the name of tag
+ * @node: the node for @tag_list in policy_qos structure
+ * @count: the number of logs used by this tag.
+ * @limited: the flag indicating whether this tag is limited or not
+ */
+struct policy_tag {
+ char name[LOGGER_POLICY_MAX_TAG_NAME + 1];
+ struct list_head node;
+ int count;
+ int limited;
+};
+
+/**
+ * struct policy_filter - structure to represent filter
+ * @name: the name of filter
+ * @node: the node for filter list
+ */
+struct policy_filter {
+ char name[LOGGER_POLICY_MAX_TAG_NAME + 1];
+ struct list_head node;
+};
+
+/**
+ * struct policy_qos_bypass - structure to represent filter
+ * @name: the name of VIP Process
+ * @node: the node for VIP Process list
+ */
+struct policy_qos_bypass {
+ char name[LOGGER_POLICY_MAX_TASK_NAME];
+ struct list_head node;
+};
+
+/**
+ * struct policy_qos_controller - structure to represent qos controller
+ * @pid: the pid of qos controller
+ * @node: the node for qos controller list
+ */
+struct policy_qos_controller {
+ int pid;
+ struct list_head node;
+};
+
+/**
+ * struct logger_policy - structure to represent logger policy
+ * @qos: the qos structure
+ * @pid: pid of current task
+ * @ref: reference count
+ * @node: the node for logger policy
+ */
+struct logger_policy {
+ struct policy_qos qos;
+ int pid;
+ int ref;
+ struct hlist_node node;
+};
+
+
+#define __LOGGERPOLICYIOCTL 0x97
+#define LOGGER_POLICY_SET_FILTER _IOW(__LOGGERPOLICYIOCTL, 0x1, char[LOGGER_POLICY_MAX_TAG_NAME])
+#define LOGGER_POLICY_SET_STATUS _IOW(__LOGGERPOLICYIOCTL, 0x2, int)
+#define LOGGER_POLICY_SET_LOGGER_ENABLE _IOW(__LOGGERPOLICYIOCTL, 0x3, int)
+#define LOGGER_POLICY_SET_QOS _IOW(__LOGGERPOLICYIOCTL, 0x4, int)
+
+int logger_policy_check(struct logger_policy *policy, uint8_t priority, const char *tag);
+int get_backend_logfilter_enable(void);
+struct logger_policy *logger_policy_get(void);
+void logger_policy_put(struct logger_policy *policy);
+int logger_policy_init(void);
+void logger_policy_exit(void);
+#endif /* __LOGGER_POLICY_H__ */
+
--- /dev/null
+/*
+ * logger_stats: Logger Stats for Tizen TV.
+ *
+ * Copyright (C) 2019 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LOGGER_STATS_H__
+#define __LOGGER_STATS_H__
+
+#define LOGGER_STATS_MISCDEV_NAME "logger_stats"
+#define LOGGER_STATS_SMACK_LABEL "*"
+#define LOGGER_STATS_SIZE 256
+
+#define __LOGGERSTATSIOCTL 0x98
+#define LOGGER_STATS_SEND _IOW(__LOGGERSTATSIOCTL, 0x2, char[LOGGER_STATS_SIZE])
+
+enum logger_stats_id {
+ LOGGER_STATS_WRITE = 0,
+ LOGGER_STATS_READ,
+ LOGGER_STATS_OVERFLOW,
+ LOGGER_STATS_DROP,
+ LOGGER_STATS_MAX,
+};
+
+void logger_stats_update(int stats_id, int log_id, int bytes, int pid);
+int logger_stats_init(void);
+void logger_stats_exit(void);
+#endif /* __LOGGER_STATS_H__ */
+
--- /dev/null
+/*
+ * A Logging Subsystem for VD
+ *
+ * Copyright (c) 2015-2021 Samsung Electronics Co., Ltd
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __VLOGGER_H__
+#define __VLOGGER_H__
+
+int vlogger_init(void);
+int vlogger_exit(void);
+#endif /* __VLOGGER_H__ */
--- /dev/null
+/*
+ * logger_ext: Extended buffer for Logger
+ *
+ * Copyright (C) 2019 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "logger_ext: " fmt
+
+#include <linux/vmalloc.h>
+#include "logger_ext.h"
+
+#ifndef NUMA_EMEM_VNODE
+#define NUMA_EMEM_VNODE 0
+#endif
+
+#define LOGGER_EXT_BUFFER_SIZE (1 << 24) // 16 MB
+#define LOGGER_EXT_BUFFER_COUNT (2)
+
+typedef enum {
+ EXT_STATE_DISABLE,
+ EXT_STATE_ENABLE,
+ EXT_STATE_ENABLE_EXHAUSTED
+} ext_state_t;
+
+struct ext_buffer {
+ unsigned int id;
+ ext_state_t status;
+ size_t offset;
+ size_t invalid;
+ char *buffer;
+};
+
+static struct ext_buffer g_ext[LOGGER_EXT_BUFFER_COUNT] = {0, };
+static unsigned int g_idx = 0;
+static unsigned int g_sequencer = 0;
+
+static int ext_get_buffer(struct ext_buffer *ext, size_t size)
+{
+ int r;
+
+ if (!ext)
+ return -1;
+
+ if (ext->status != EXT_STATE_ENABLE)
+ return -1;
+
+ if (!ext->buffer) {
+ char *buf = vmalloc_node(LOGGER_EXT_BUFFER_SIZE, NUMA_EMEM_VNODE);
+ if (!buf) {
+ pr_err("[%u] Failed to allocate memory!\n", ext->id);
+ return -1;
+ }
+ ext->buffer = buf;
+ pr_info("[%u] Allocate memory (%p)\n", ext->id, ext->buffer);
+ }
+
+ r = (int)ext->offset;
+ ext->offset += size;
+
+ if (ext->offset >= LOGGER_EXT_BUFFER_SIZE) {
+ ext->status = EXT_STATE_ENABLE_EXHAUSTED;
+ pr_info("[%u] Use all extended buffer (%p)\n", ext->id, ext->buffer);
+ return -1;
+ }
+
+ return r;
+}
+
+static bool ext_enable(struct ext_buffer *ext)
+{
+ if (!ext)
+ return false;
+
+ if (ext->status != EXT_STATE_DISABLE)
+ return false;
+
+ ext->buffer = NULL;
+ ext->id = g_sequencer++;
+ ext->offset = 0;
+ ext->invalid = 0;
+ ext->status = EXT_STATE_ENABLE;
+
+ pr_info("[%u] Enable extended buffer\n", ext->id);
+ return true;
+}
+
+static bool ext_disable(struct ext_buffer *ext)
+{
+ if (!ext)
+ return false;
+
+ if (ext->status == EXT_STATE_DISABLE) {
+ pr_err("[%u] Already disabled!\n", ext->id);
+ return true;
+ }
+
+ if (ext->buffer)
+ vfree(ext->buffer);
+ ext->buffer = NULL;
+ ext->offset = 0;
+ ext->invalid = 0;
+ ext->status = EXT_STATE_DISABLE;
+
+ pr_info("[%u] Disable extended buffer\n", ext->id);
+ return true;
+}
+
+static bool ext_invalidate(struct ext_buffer *ext, size_t invalid, size_t invalid_max)
+{
+ if (!ext)
+ return false;
+
+ if (ext->status != EXT_STATE_ENABLE_EXHAUSTED)
+ return false;
+
+ ext->invalid += invalid;
+ if (ext->invalid < invalid_max)
+ return false;
+
+ return ext_disable(ext);
+}
+
+/*
+ * logger_ext_get_buffer - Get the offset of extended buffer
+ *
+ * The caller needs to hold log.mutex.
+ */
+int logger_ext_get_buffer(size_t size)
+{
+ unsigned int i;
+ int r;
+
+ for (i = 0; i < LOGGER_EXT_BUFFER_COUNT; i++) {
+ r = ext_get_buffer(&g_ext[g_idx], size);
+ if (r >= 0)
+ return (r + (g_idx * LOGGER_EXT_BUFFER_SIZE));
+
+ g_idx = (g_idx + 1) % LOGGER_EXT_BUFFER_COUNT;
+ }
+
+ return -1;
+}
+
+/*
+ * logger_ext_get_address - Get the address of extended buffer from offset
+ *
+ * The caller needs to hold log.mutex.
+ */
+char *logger_ext_get_address(int offset)
+{
+ int idx = offset / LOGGER_EXT_BUFFER_SIZE;
+ int off = offset % LOGGER_EXT_BUFFER_SIZE;
+
+ if (idx >= 0 && idx < LOGGER_EXT_BUFFER_COUNT)
+ return (char *)g_ext[idx].buffer + off;
+
+ return NULL;
+}
+
+/*
+ * logger_ext_enable - Enable extended buffer
+ *
+ * The caller needs to hold log.mutex.
+ */
+bool logger_ext_enable(void)
+{
+ unsigned int i;
+ unsigned int idx = g_idx;
+
+ for (i = 0; i < LOGGER_EXT_BUFFER_COUNT; i++) {
+ if (ext_enable(&g_ext[idx]))
+ return true;
+ idx = (idx + 1) % LOGGER_EXT_BUFFER_COUNT;
+ }
+
+ return false;
+}
+
+/*
+ * logger_ext_disable - Disable extended buffer
+ *
+ * The caller needs to hold log.mutex.
+ */
+bool logger_ext_disable(void)
+{
+ unsigned int i;
+ unsigned int idx = g_idx;
+ bool ret = false;
+
+ for (i = 0; i < LOGGER_EXT_BUFFER_COUNT; i++) {
+ ret |= ext_disable(&g_ext[idx]);
+ idx = (idx + 1) % LOGGER_EXT_BUFFER_COUNT;
+ }
+
+ return ret;
+}
+
+/*
+ * logger_ext_invalidate
+ *
+ * For all extended buffers, update a invalidation offset and check whether it can be disabled.
+ * The caller needs to hold log.mutex.
+ */
+bool logger_ext_invalidate(size_t invalid, size_t invalid_max)
+{
+ unsigned int i;
+ unsigned int idx = g_idx;
+ bool ret = false;
+
+ for (i = 0; i < LOGGER_EXT_BUFFER_COUNT; i++) {
+ ret |= ext_invalidate(&g_ext[idx], invalid, invalid_max);
+ idx = (idx + 1) % LOGGER_EXT_BUFFER_COUNT;
+ }
+
+ return ret;
+}
+
--- /dev/null
+#include "logger_filter.h"
+#include <linux/slab.h>
+#include <linux/err.h>
+
+/**
+ * logger_tagname_registry_new() - create a new name registry
+ *
+ * Return: a new logger_tagname_registry on success, ERR_PTR on failure.
+ */
+struct logger_tagname_registry *logger_tagname_registry_new(void)
+{
+ struct logger_tagname_registry *r;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return ERR_PTR(-ENOMEM);
+
+ hash_init(r->entries_hash);
+ r->name_seq_last = 0;
+
+ return r;
+}
+
+/**
+ * logger_tagname_registry_free() - free name registry
+ * @r: name registry to free, or NULL
+ *
+ * Free a name registry and cleanup all internal objects. This is a no-op if
+ * you pass NULL as registry.
+ */
+void logger_tagname_registry_free(struct logger_tagname_registry *r)
+{
+ if (!r)
+ return;
+
+ WARN_ON(!hash_empty(r->entries_hash));
+ kfree(r);
+}
+
+/**
+ * logger_strhash - calculate a hash
+ * @str: String
+ *
+ * Return: hash value
+ */
+unsigned int logger_strhash(const char *str)
+{
+ unsigned long hash = 0;
+
+ while (*str) {
+ hash += (*str << 4) + (*str >> 4);
+ hash *= 11;
+ str++;
+ }
+
+ return (unsigned int)hash;
+}
--- /dev/null
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "logger: " fmt
+#define MAX_LOG 129
+#define N_FD 3
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+#include <linux/uio.h>
+#include <linux/device.h>
+#include <linux/fdtable.h>
+#include <linux/compat.h>
+#include <linux/cred.h>
+#include <linux/version.h>
+/* The KERNEL_VERSION macro check is added to maintain build
+ * compatibility with old versions of VD kernel.
+ * This macro is defined in <linux/version.h>
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+/* The below headers are included to fix build errors of type
+ * "implicit-function-declaration" when build with kernel 5.4
+ */
+#include <linux/sched/signal.h>
+#include <linux/sched/clock.h>
+#endif
+#include "logger.h"
+#include "logger_policy.h"
+#include "logger_filter.h"
+#include "logger_stats.h"
+#include "logger_ext.h"
+#include "vlogger.h"
+
+#ifndef NUMA_EMEM_VNODE
+#define NUMA_EMEM_VNODE 0
+#endif
+
+#define LOGGER_NR_SEGS (3)
+#define ENABLE_ONE_BUFF (7)
+#define MAGIC_EXT (0xFEFE)
+
+#define LOGGER_GET_IOV_DATA(p, l, v, n) { \
+ if (copy_from_user(p, (void __user *)v->iov[n].iov_base, min_t(size_t, l, v->iov[n].iov_len))) \
+ return -EFAULT; \
+}
+
+#define get_user_hdr_len(v) \
+ (v < 2 ? sizeof(struct user_logger_entry_compat) : sizeof(struct logger_entry))
+
+/**
+ * struct logger_log - provide data for managing log buffer
+ * @buffer: The actual ring buffer
+ * @readers: This log's readers
+ * @mutex: The mutex that protects the @buffer
+ * @w_off: The current write head offset
+ * @head: The head, or location that readers start reading at.
+ * @size: The size of the log
+ * @eot: End-of-Tape, The offset of beginning of margin.
+ * @miscs: The "misc" devices representing the log
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+ unsigned char *buffer;
+ struct list_head readers;
+ struct mutex mutex;
+ size_t w_off;
+ size_t head;
+ size_t size;
+ size_t eot;
+ struct miscdevice miscs[NR_LOGGER_IDS];
+};
+
+/*
+ * struct logger_filter_pid_list - Data set to filter pid and its nos
+ * @no_of_pids: Total no of pids for filtering
+ * @pid_list: The list of all pids for filtering
+*/
+struct logger_pid_list{
+ int no_of_pids;
+ pid_t *pid_list;
+};
+
+#ifdef CONFIG_COMPAT
+struct logger_pid_list_compat {
+ int no_of_pids;
+ compat_uptr_t pid_list;
+};
+#endif
+
+/*
+ * struct logger_backend_filter - Data set to filter log on backend side
+ * @global_pri: The priority defined globally
+ * @tag_names: The list of all tag names for each reader
+ * @pid: Target pid for reading
+*/
+struct logger_backend_filter {
+ log_priority global_pri;
+ struct logger_tagname_registry *tag_names;
+ struct logger_pid_list pid_filter_list;
+};
+
+/**
+ * struct log_writer - a logging device open for writing
+ * @owner: The owner task of writer
+ * @prio: Default message priority value
+ * @tag: The tag to be attached to messages
+ * @tag_len: The length of the tag
+ */
+struct logger_writer {
+ struct task_struct *owner;
+ int prio;
+ size_t tag_len;
+ char tag[DLOG_MAX_TAGNAME + 1];
+};
+
+/**
+ * struct logger_reader - a logging device open for reading
+ * @log: The associated log
+ * @list: The associated entry in @logger_log's list
+ * @r_off: The current read head offset.
+ * @r_all: Reader can read all entries
+ * @r_ver: Reader ABI version
+ * @wq: The wait queue for @reader
+ * @logger_ids: Mask of interested buffer ids
+ * @filter_data: Data to filter on backend
+ * @pid: The pid of reader
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log.mutex.
+ */
+struct logger_reader {
+ struct list_head list;
+ size_t r_off;
+ bool r_all;
+ int r_ver;
+ wait_queue_head_t wq;
+ unsigned long logger_ids;
+ struct logger_backend_filter filter_data;
+ pid_t pid;
+};
+
+struct logger_pdata {
+ int logger_id;
+ void *reader;
+ void *writer;
+ void *policy;
+};
+
+struct logger_ext_entry {
+ __u16 magic;
+ __s32 offset;
+};
+
+static struct logger_log log;
+
+static int g_plog_enable = 0;
+module_param_named(plog_enable, g_plog_enable, int, 0644);
+
+/*
+ * get_ext_entry - return the entry located at the offset if it is extended.
+ *
+ * The caller needs to hold log.mutex.
+ */
+static inline struct logger_ext_entry *get_ext_entry(size_t off)
+{
+ struct logger_ext_entry *ext = (struct logger_ext_entry *)(log.buffer + off);
+
+ if (ext == NULL || ext->magic != MAGIC_EXT)
+ return NULL;
+
+ return ext;
+}
+
+/*
+ * get_entry - return the entry located at the offset.
+ *
+ * The caller needs to hold log.mutex.
+ */
+static inline struct logger_entry *get_entry(size_t off)
+{
+ struct logger_ext_entry *ext = get_ext_entry(off);
+
+ if (ext)
+ return (struct logger_entry *)logger_ext_get_address(ext->offset);
+
+ return (struct logger_entry *)(log.buffer + off);
+}
+
+static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
+ char __user *buf, size_t *hdr_len)
+{
+ void *hdr;
+ struct user_logger_entry_compat v1;
+
+ if (ver < 2) {
+ v1.len = entry->len;
+ v1.__pad = 0;
+ v1.pid = entry->pid;
+ v1.tid = entry->tid;
+ v1.sec = entry->sec;
+ v1.nsec = entry->nsec;
+ hdr = &v1;
+ *hdr_len = sizeof(struct user_logger_entry_compat);
+ } else {
+ hdr = entry;
+ *hdr_len = sizeof(struct logger_entry);
+ }
+
+ return copy_to_user(buf, hdr, *hdr_len);
+}
+
+/*
+ * get_next_entry - return the offset of the next entry.
+ *
+ * The caller needs to hold log.mutex.
+ */
+static size_t get_next_entry(size_t off)
+{
+ if (get_ext_entry(off))
+ off += sizeof(struct logger_ext_entry);
+ else
+ off += (sizeof(struct logger_entry) + get_entry(off)->len);
+
+ if (off == log.eot)
+ off = 0;
+
+ return off;
+}
+
+/*
+ * fix_up_offset - update the offset to the first valid entry at least 'len'
+ * bytes after 'off' and return the size of skipped entries.
+ *
+ * The caller needs to hold log.mutex.
+ */
+static size_t fix_up_offset(size_t *off, size_t len)
+{
+ size_t count = 0;
+ size_t ret = 0;
+
+ do {
+ if (*off >= log.eot) {
+ struct logger_reader *reader;
+ list_for_each_entry(reader, &log.readers, list) {
+ pr_err("r:%zu pid:%d", reader->r_off, (int)current->tgid);
+ }
+ pr_err("LOGGER_ERROR!! cnt:%zu len:%zu off:%zu h:%zu w:%zu e:%zu %s(%d)",
+ count, len, *off, log.head, log.w_off,
+ log.eot, current->comm, (int)current->tgid);
+ *off = 0;
+ }
+
+ if (get_ext_entry(*off))
+ count += sizeof(struct logger_ext_entry);
+ else
+ count += sizeof(struct logger_entry) + get_entry(*off)->len;
+
+ ret += sizeof(struct logger_entry) + get_entry(*off)->len;
+
+ *off = get_next_entry(*off);
+ } while (count < len);
+
+ return ret;
+}
+
+static bool check_pid_filter(struct logger_pid_list *ptr, pid_t pid)
+{
+ int i = 0;
+
+ for(i = 0; i < ptr->no_of_pids; i++){
+ if(ptr->pid_list[i] == pid)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * do_match_log_filters - Filtering message by tag.
+ *
+ * If the tag and tag priority matches, it returns true.
+ * and it returns false if not.
+ *
+ * The caller needs to hold log.mutex.
+ */
+static bool do_match_log_filters(struct logger_reader *reader)
+{
+ struct logger_tagname_registry *r = reader->filter_data.tag_names;
+ struct logger_entry *entry = get_entry(reader->r_off);
+ log_priority pri = entry->msg[0];
+ char *tag = &entry->msg[1];
+ __u32 hashkey = entry->hash;
+
+ /* Check Tag priority */
+ if (pri < DLOG_UNKNOWN || pri > DLOG_SILENT) {
+ pr_info("Wrong message priority\n");
+ goto skip_entry;
+ }
+
+ if ((reader->filter_data.pid_filter_list.no_of_pids > 0)
+ && !check_pid_filter(&reader->filter_data.pid_filter_list, entry->pid))
+ goto skip_entry;
+
+ if (reader->filter_data.global_pri == DLOG_PRIO_MAX)
+ return true;
+
+ if (r->name_seq_last) {
+ filter_info *filter;
+ hash_for_each_possible(r->entries_hash, filter, hentry, hashkey) {
+ if (strcmp(filter->tagname, tag))
+ continue;
+
+ if (pri >= filter->priority)
+ return true;
+ else
+ goto skip_entry;
+ }
+ }
+
+ if (pri >= reader->filter_data.global_pri)
+ return true;
+
+skip_entry:
+ /* Calculate offset for next entry */
+ reader->r_off = get_next_entry(reader->r_off);
+ return false;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * The caller needs to hold log.mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_reader *reader, char __user *buf)
+{
+ struct logger_entry *entry;
+ size_t hdr_len;
+
+ /*
+ * First, copy the header to userspace, using the version of
+ * the header requested
+ */
+ entry = get_entry(reader->r_off);
+ if (copy_header_to_user(reader->r_ver, entry, buf, &hdr_len))
+ return -EFAULT;
+
+ /*
+ * Now, copy message to userspace.
+ */
+ if (copy_to_user(buf + hdr_len, entry->msg, entry->len))
+ return -EFAULT;
+
+ logger_stats_update(LOGGER_STATS_READ, entry->logger_id, entry->len + sizeof(struct logger_entry), reader->pid);
+
+ reader->r_off = get_next_entry(reader->r_off);
+
+ return entry->len + hdr_len;
+}
+
+/*
+ * get_next_entry_by_uid - Starting at 'off', returns an offset into
+ * 'log buffer' which contains the first entry readable by 'euid'
+ *
+ * The caller needs to hold log.mutex.
+ */
+static size_t get_next_entry_by_uid(size_t off, kuid_t euid)
+{
+ while (off != log.w_off) {
+ struct logger_entry *entry = get_entry(off);
+
+ if (uid_eq(entry->euid, euid))
+ return off;
+
+ off = get_next_entry(off);
+ }
+ return off;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * - O_NONBLOCK works
+ * - If there are no log entries to read, blocks until log is written to
+ * - Atomically reads exactly one log entry
+ *
+ * If filter enabled, finds until there are matching log in the buffer.
+ * set errno to EINTR if reader can't find a tag that matches up to the end
+ * (i.e w_off == r_off)
+ *
+ * Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct logger_pdata *pdata = file->private_data;
+ struct logger_reader *reader = pdata->reader;
+ ssize_t ret = -EINTR;
+ DEFINE_WAIT(wait);
+
+ while (1) {
+ mutex_lock(&log.mutex);
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(
+ reader->r_off, current_euid());
+
+ if (log.w_off != reader->r_off) {
+ struct logger_entry *entry = get_entry(reader->r_off);
+
+ /* get the size of the next entry */
+ ret = get_user_hdr_len(reader->r_ver) + entry->len;
+ if (count < ret) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (test_bit(entry->logger_id, &reader->logger_ids) == 0) {
+ reader->r_off = get_next_entry(reader->r_off);
+ if (log.w_off == reader->r_off) {
+ ret = -EINTR;
+ break;
+ }
+ mutex_unlock(&log.mutex);
+ continue;
+ }
+
+ /* Filter matching if backend filter is enabled */
+ if (get_backend_logfilter_enable()) {
+ if (!do_match_log_filters(reader)) {
+ /* When there are no more messages to read */
+ if (log.w_off == reader->r_off) {
+ ret = -EINTR;
+ break;
+ }
+ mutex_unlock(&log.mutex);
+ continue;
+ }
+ }
+
+ /* get exactly one entry from the log */
+ ret = do_read_log_to_user(reader, buf);
+ break;
+ }
+ mutex_unlock(&log.mutex);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ prepare_to_wait(&reader->wq, &wait, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&reader->wq, &wait);
+ }
+
+ mutex_unlock(&log.mutex);
+ return ret;
+}
+
+/*
+ * is_between - is a < c < b, accounting for wrapping of a, b, and c
+ * positions in the buffer
+ *
+ * That is, if a<b, check for c between a and b
+ * and if a>b, check for c outside (not between) a and b
+ *
+ * |------- a xxxxxxxx b --------|
+ * c^
+ *
+ * |xxxxx b --------- a xxxxxxxxx|
+ * c^
+ * or c^
+ */
+static inline int is_between(size_t a, size_t b, size_t c)
+{
+ if (a < b) {
+ /* is c between a and b? */
+ if (a < c && c <= b)
+ return 1;
+ } else {
+ /* is c outside of b through a? */
+ if (c <= b || a < c)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log.mutex.
+ */
+static void fix_up_readers(size_t len, int logger_id)
+{
+ size_t old = log.w_off;
+ size_t new = (old + len) & (log.size - 1); /* 2^n modulo */
+ struct logger_reader *reader;
+
+ if (is_between(old, new, log.head))
+ fix_up_offset(&log.head, len);
+
+ /* If offset is equal to new EoT, reset it to 0 */
+ if (new < old && old == log.head)
+ log.head = 0;
+
+ list_for_each_entry(reader, &log.readers, list) {
+ if (is_between(old, new, reader->r_off)) {
+ size_t dropped = fix_up_offset(&reader->r_off, len);
+ logger_stats_update(LOGGER_STATS_OVERFLOW, logger_id, dropped, reader->pid);
+ }
+
+ /* If offset is equal to new EoT, reset it to 0 */
+ if (new < old && old == reader->r_off)
+ reader->r_off = 0;
+ }
+}
+
+static void print_warn_info(struct iov_iter *from)
+{
+ int i;
+ char unformatstring[MAX_LOG] = {0,};
+ struct fdtable *fdt=NULL;
+ struct files_struct *files_p = NULL;
+ struct file *file = NULL;
+ char *path = NULL;
+ int log_length = min_t(size_t, MAX_LOG, from->iov[0].iov_len);
+
+ if (copy_from_user(unformatstring, from->iov[0].iov_base, log_length))
+ pr_err("Illegal write by process : %s\n", current->comm);
+ else
+ pr_err("Illegal write by process : %s, String : %s\n", current->comm, unformatstring);
+
+
+ files_p = current->files;
+ if (files_p) {
+ int min_fd;
+ spin_lock(&files_p->file_lock);
+ fdt = files_fdtable(files_p);
+ min_fd = N_FD < fdt->max_fds ? N_FD : fdt->max_fds;
+
+ for (i = 0; i < min_fd; i++) {
+ char tmp[256] = {0,};
+ file = fdt->fd[i];
+ if (file) {
+ path = d_path(&file->f_path, tmp, sizeof(tmp));
+ if (IS_ERR(path)) {
+ pr_err("Error parsing path of FD : %d, error :%ld", i, PTR_ERR(path));
+ continue;
+ }
+ } else
+ continue;
+ pr_err("FD : %d ----> %s\n", i, path);
+ }
+ spin_unlock(&files_p->file_lock);
+ }
+}
+
+/*
+ * logger_write_iter - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct logger_pdata *pdata = iocb->ki_filp->private_data;
+ struct logger_policy *policy = pdata->policy;
+ struct logger_writer *writer = pdata->writer;
+ unsigned long nr_segs = from->nr_segs;
+ struct logger_entry header;
+ size_t count, w_off;
+ u64 ts;
+ unsigned long nsec;
+ size_t entry_len, fix_up_len;
+ struct logger_reader *reader;
+ int ret;
+ uint8_t priority = DLOG_UNKNOWN;
+ char tag[DLOG_MAX_TAGNAME + 1] = {};
+ size_t tag_size;
+ size_t msg_size;
+ unsigned char *buffer;
+ struct logger_ext_entry ext;
+ void __user * msg;
+
+ if (!g_plog_enable)
+ return -EPERM;
+
+ if (nr_segs == LOGGER_NR_SEGS && from->iov[0].iov_len == 1) {
+ LOGGER_GET_IOV_DATA(&priority, sizeof(priority), from, 0);
+ LOGGER_GET_IOV_DATA(tag, DLOG_MAX_TAGNAME, from, 1);
+ } else if (nr_segs == 1) {
+ priority = (uint8_t)writer->prio;
+ strncpy(tag, writer->tag, sizeof(tag));
+ } else {
+ print_warn_info(from);
+ return -EINVAL;
+ }
+
+ tag_size = strlen(tag) + 1;
+ msg = from->iov[nr_segs-1].iov_base;
+ msg_size = min_t(size_t, from->iov[nr_segs-1].iov_len, LOGGER_ENTRY_MAX_PAYLOAD - tag_size - 1);
+ count = msg_size + tag_size + 1;
+
+ /* null writes succeed, return zero */
+ if (unlikely(!msg_size))
+ return 0;
+
+ /* check logger policy */
+ ret = logger_policy_check(policy, priority, tag);
+ if (ret < 0) {
+ if (ret == -EDQUOT)
+ logger_stats_update(LOGGER_STATS_DROP, pdata->logger_id, count + sizeof(struct logger_entry), current->tgid);
+ return 0;
+ }
+
+ header.pid = current->tgid;
+ header.tid = current->pid;
+ header.euid = current_euid();
+ header.len = count;
+ header.logger_id = pdata->logger_id;
+
+ if (get_backend_logfilter_enable())
+ header.hash = logger_strhash(tag);
+
+ mutex_lock(&log.mutex);
+
+ if (unlikely(log.buffer == NULL))
+ {
+ log.buffer = vmalloc_node(log.size, NUMA_EMEM_VNODE);
+ if (log.buffer == NULL)
+ {
+ mutex_unlock(&log.mutex);
+ return -ENOMEM;
+ }
+ }
+
+ ts = local_clock();
+ nsec = do_div(ts, 1000000000);
+ header.sec = ts;
+ header.nsec = nsec;
+ entry_len = sizeof(struct logger_entry) + count;
+ ext.offset = logger_ext_get_buffer(entry_len);
+ if (ext.offset >= 0) {
+ ext.magic = MAGIC_EXT;
+ entry_len = sizeof(struct logger_ext_entry);
+ }
+ w_off = log.w_off;
+
+ /*
+ * Check whether left room is enough for commit.
+ * If it's not, then skip it and start from offset 0.
+ */
+ fix_up_len = entry_len;
+ if (unlikely(entry_len > log.size - w_off)) {
+ fix_up_len += log.size - w_off;
+ w_off = 0;
+ }
+
+ /*
+ * Fix up any readers, pulling them forward to the first readable
+ * entry after (what will be) the new write offset. We do this now
+ * because if we partially fail, we can end up with clobbered log
+ * entries that encroach on readable buffer.
+ */
+ fix_up_readers(fix_up_len, header.logger_id);
+
+ buffer = log.buffer + w_off;
+ if (ext.offset >= 0) {
+ memcpy(buffer, &ext, sizeof(struct logger_ext_entry));
+ buffer = logger_ext_get_address(ext.offset);
+ }
+
+ memcpy(buffer, &header, sizeof(struct logger_entry));
+ buffer += sizeof(struct logger_entry);
+
+ *buffer++ = priority;
+
+ memcpy(buffer, tag, tag_size);
+ buffer += tag_size;
+
+ if (copy_from_user(buffer, msg, msg_size)) {
+ mutex_unlock(&log.mutex);
+ return -EFAULT;
+ }
+ w_off += entry_len;
+
+ /* Update End-of-Tape*/
+ if (w_off < log.w_off)
+ log.eot = log.w_off;
+ else if (w_off > log.eot)
+ log.eot = w_off;
+
+ log.w_off = (w_off == log.eot ? 0 : w_off);
+
+ /* disable extended buffer if it is not used anymore */
+ logger_ext_invalidate(entry_len, log.size);
+
+ /* wake up any blocked readers */
+ list_for_each_entry(reader, &log.readers, list) {
+ if (test_bit(header.logger_id, &reader->logger_ids) != 0)
+ wake_up_interruptible(&reader->wq);
+ }
+
+ mutex_unlock(&log.mutex);
+
+ logger_stats_update(LOGGER_STATS_WRITE, header.logger_id, count + sizeof(struct logger_entry), current->tgid);
+
+ return from->count;
+}
+
+/*
+ * logger_open - the log's open() file operation
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+ struct logger_pdata *pdata;
+ const char *fname;
+
+ nonseekable_open(inode, file);
+
+ pdata = kzalloc(sizeof(struct logger_pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ fname = file->f_path.dentry->d_name.name;
+ if (!strncmp(fname, LOGGER_LOG_MAIN, strlen(LOGGER_LOG_MAIN)))
+ pdata->logger_id = LOGGER_ID_MAIN;
+ else if (!strncmp(fname, LOGGER_LOG_SYSTEM, strlen(LOGGER_LOG_SYSTEM)))
+ pdata->logger_id = LOGGER_ID_SYSTEM;
+ else if (!strncmp(fname, LOGGER_LOG_EVENTS, strlen(LOGGER_LOG_EVENTS)))
+ pdata->logger_id = LOGGER_ID_EVENTS;
+ else if (!strncmp(fname, LOGGER_LOG_RADIO, strlen(LOGGER_LOG_RADIO)))
+ pdata->logger_id = LOGGER_ID_RADIO;
+ else {
+ kfree(pdata);
+ return -EINVAL;
+ }
+
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader;
+
+ reader = kzalloc(sizeof(struct logger_reader), GFP_KERNEL);
+ if (!reader) {
+ kfree(pdata);
+ return -ENOMEM;
+ }
+
+ reader->r_ver = 1;
+#if defined(CHECK_READ_PRIVILEGE)
+ reader->r_all = in_egroup_p(inode->i_gid) || capable(CAP_SYSLOG);
+#else
+ reader->r_all = 1;
+#endif
+ reader->pid = current->tgid;
+ set_bit(pdata->logger_id, &reader->logger_ids);
+
+ init_waitqueue_head(&reader->wq);
+ INIT_LIST_HEAD(&reader->list);
+ reader->filter_data.global_pri = DLOG_PRIO_MAX;
+ if (get_backend_logfilter_enable()) {
+ reader->filter_data.tag_names = logger_tagname_registry_new();
+ if (IS_ERR(reader->filter_data.tag_names)) {
+ int ret = PTR_ERR(reader->filter_data.tag_names);
+ reader->filter_data.tag_names = NULL;
+ kfree(reader);
+ kfree(pdata);
+ return ret;
+ }
+ }
+
+ reader->filter_data.pid_filter_list.no_of_pids = 0;
+ reader->filter_data.pid_filter_list.pid_list = NULL;
+
+ mutex_lock(&log.mutex);
+ reader->r_off = log.head;
+ list_add_tail(&reader->list, &log.readers);
+ mutex_unlock(&log.mutex);
+
+ pdata->reader = reader;
+ }
+
+ if (file->f_mode & FMODE_WRITE) {
+ struct logger_policy *policy;
+ struct logger_writer *writer;
+
+ writer = kzalloc(sizeof(struct logger_writer), GFP_KERNEL);
+ if (!writer) {
+ if (pdata->reader)
+ kfree(pdata->reader);
+ kfree(pdata);
+ return -ENOMEM;
+ }
+ writer->owner = current->group_leader;
+ writer->prio = DLOG_INFO;
+ writer->tag_len = 6;
+ strncpy(writer->tag, "STDOUT", DLOG_MAX_TAGNAME);
+ pdata->writer = writer;
+
+ policy = logger_policy_get();
+ if (IS_ERR(policy)) {
+ if (pdata->reader)
+ kfree(pdata->reader);
+ if (pdata->writer)
+ kfree(pdata->writer);
+ kfree(pdata);
+ return PTR_ERR(policy);
+ }
+
+ pdata->policy = policy;
+ }
+
+ file->private_data = pdata;
+
+ return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+ struct logger_pdata *pdata = file->private_data;
+
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = pdata->reader;
+ struct logger_tagname_registry *r = reader->filter_data.tag_names;
+
+ mutex_lock(&log.mutex);
+ list_del(&reader->list);
+
+ if (r && get_backend_logfilter_enable()) {
+ filter_info *filter;
+ struct hlist_node *tmp;
+ int bkt;
+
+ hash_for_each_safe(r->entries_hash, bkt, tmp, filter, hentry) {
+ hash_del(&filter->hentry);
+ if (filter->tagname)
+ kfree(filter->tagname);
+ kfree(filter);
+ }
+
+ logger_tagname_registry_free(r);
+ }
+ mutex_unlock(&log.mutex);
+
+ kfree(reader);
+ }
+
+ if (file->f_mode & FMODE_WRITE) {
+ logger_policy_put(pdata->policy);
+ kfree(pdata->writer);
+ }
+
+ kfree(file->private_data);
+
+ return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+ struct logger_pdata *pdata = file->private_data;
+ struct logger_reader *reader;
+ unsigned int ret = POLLOUT | POLLWRNORM;
+
+ if (!(file->f_mode & FMODE_READ))
+ return ret;
+
+ reader = pdata->reader;
+
+ if (test_bit(pdata->logger_id, &reader->logger_ids) == 0)
+ return ret;
+
+ poll_wait(file, &reader->wq, wait);
+
+ mutex_lock(&log.mutex);
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(
+ reader->r_off, current_euid());
+
+ if (log.w_off != reader->r_off)
+ ret |= POLLIN | POLLRDNORM;
+ mutex_unlock(&log.mutex);
+
+ return ret;
+}
+
+static long logger_set_version(struct logger_reader *reader, void __user *arg)
+{
+ int version;
+
+ if (copy_from_user(&version, arg, sizeof(int)))
+ return -EFAULT;
+
+ if ((version < 1) || (version > 2))
+ return -EINVAL;
+
+ reader->r_ver = version;
+ return 0;
+}
+
+static long logger_set_global_priority(struct logger_reader *reader, void __user *arg)
+{
+ if (!get_backend_logfilter_enable())
+ return -EPERM;
+
+ if (get_user(reader->filter_data.global_pri, (log_priority __user *)arg)) {
+ reader->filter_data.global_pri = DLOG_SILENT;
+ return -EFAULT;
+ }
+
+ if ((reader->filter_data.global_pri < DLOG_UNKNOWN
+ || reader->filter_data.global_pri > DLOG_SILENT)) {
+ reader->filter_data.global_pri = DLOG_SILENT;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static long logger_set_logger_ids(struct logger_reader *reader, int arg)
+{
+ unsigned long logbuf_id = (unsigned long)arg;
+ int id;
+
+ if ((logbuf_id & 0xf) == 0)
+ return -EINVAL;
+
+ reader->logger_ids = 0;
+ for (id = 0; id < NR_LOGGER_IDS; id++) {
+ if (test_bit(id, &logbuf_id) != 0) {
+ set_bit(id, &reader->logger_ids);
+ pr_debug("%d log buffer is enabled", id);
+ }
+ }
+ set_bit(ENABLE_ONE_BUFF, &reader->logger_ids);
+ pr_info("Masking status of log buffer : %x", (int)reader->logger_ids);
+
+ return 0;
+}
+
+static long logger_set_tag_filter_info(struct logger_reader *reader, void __user *arg)
+{
+ void *tagname_ptr = NULL;
+ filter_info *karg;
+ log_priority prio = DLOG_UNKNOWN;
+ struct logger_tagname_registry *r;
+ __u32 hash = 0;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct filter_args_compat_t cuarg;
+ if(copy_from_user(&cuarg, arg, sizeof(struct filter_args_compat_t)))
+ return -EFAULT;
+
+ tagname_ptr = compat_ptr(cuarg.tagname);
+ prio = cuarg.priority;
+ }
+ else
+#endif
+ {
+ struct filter_args_t uarg;
+ if(copy_from_user(&uarg, arg, sizeof(struct filter_args_t)))
+ return -EFAULT;
+
+ tagname_ptr = uarg.tagname;
+ prio = uarg.priority;
+ }
+ if (!get_backend_logfilter_enable())
+ return -EPERM;
+
+ r = reader->filter_data.tag_names;
+ if (r->name_seq_last >= DLOG_MAX_TAGS)
+ return -EINVAL;
+
+ if (prio < DLOG_UNKNOWN || prio > DLOG_SILENT)
+ return -EINVAL;
+
+ karg = kzalloc(sizeof(filter_info), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+
+ karg->tagname = strndup_user(tagname_ptr, DLOG_MAX_TAGNAME + 1);
+ if (IS_ERR(karg->tagname)) {
+ long ret = PTR_ERR(karg->tagname);
+ kfree(karg);
+ return ret;
+ }
+
+ /* blocking zero length-ed tag */
+ if (!strlen(karg->tagname)) {
+ kfree(karg->tagname);
+ kfree(karg);
+ return -EINVAL;
+ }
+
+ karg->priority = prio;
+ hash = logger_strhash(karg->tagname);
+ hash_add(r->entries_hash, &karg->hentry, hash);
+ r->name_seq_last++;
+
+ return 0;
+}
+
+static long logger_set_pid(struct logger_reader *reader, void __user *arg)
+{
+ void *plist_ptr = NULL;
+ int plist_len = 0;
+ int n_pids = 0;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct logger_pid_list_compat p_list;
+ plist_len = sizeof(struct logger_pid_list_compat);
+ if (copy_from_user(&p_list, arg, plist_len)) {
+ return -EFAULT;
+ }
+ plist_ptr = compat_ptr(p_list.pid_list);
+ n_pids = p_list.no_of_pids;
+ }
+ else
+#endif
+ {
+ struct logger_pid_list p_list;
+ plist_len = sizeof(struct logger_pid_list);
+ if (copy_from_user(&p_list, arg, plist_len)) {
+ return -EFAULT;
+ }
+ plist_ptr = p_list.pid_list;
+ n_pids = p_list.no_of_pids;
+ }
+
+ reader->filter_data.pid_filter_list.no_of_pids = 0;
+
+ if(reader->filter_data.pid_filter_list.pid_list){
+ kfree(reader->filter_data.pid_filter_list.pid_list);
+ reader->filter_data.pid_filter_list.pid_list = NULL;
+ }
+
+ if(n_pids) {
+ reader->filter_data.pid_filter_list.no_of_pids = n_pids;
+
+ /* Allocate kernel memory for pid_list */
+ reader->filter_data.pid_filter_list.pid_list = (pid_t *) kmalloc((reader->filter_data.pid_filter_list.no_of_pids) * sizeof(pid_t), GFP_KERNEL);
+ if(reader->filter_data.pid_filter_list.pid_list == NULL) {
+ return -ENOMEM;
+ }
+
+ /* Now copy pid_list from user space to kernel space */
+ if (copy_from_user(reader->filter_data.pid_filter_list.pid_list, plist_ptr, (n_pids * sizeof(pid_t))) != 0) {
+ kfree(reader->filter_data.pid_filter_list.pid_list);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static long logger_get_log_len(struct logger_reader *reader)
+{
+ long ret = 0;
+
+ if (log.w_off >= reader->r_off)
+ ret = log.w_off - reader->r_off;
+ else
+ ret = (log.eot - reader->r_off) + log.w_off;
+
+ return ret;
+}
+
+static long logger_get_next_entry_len(struct logger_reader *reader)
+{
+ long ret = 0;
+
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(reader->r_off, current_euid());
+
+ if (log.w_off != reader->r_off)
+ ret = get_user_hdr_len(reader->r_ver) + get_entry(reader->r_off)->len;
+
+ return ret;
+}
+
+static long logger_flush_log(struct file *file)
+{
+ if (!(in_egroup_p(file_inode(file)->i_gid) || capable(CAP_SYSLOG)))
+ return (long)-EPERM;
+
+ pr_info("Flush log by comm:%s", current->comm);
+ log.head = log.w_off;
+
+ return 0;
+}
+
+static long logger_set_prio(struct logger_writer *writer, void __user *arg)
+{
+ int prio;
+
+ prio = (int)(uintptr_t)arg;
+
+ if (prio < DLOG_UNKNOWN || prio > DLOG_SILENT)
+ return -EINVAL;
+
+ writer->prio = prio;
+ return 0;
+}
+
+static long logger_set_tag(struct logger_writer *writer, void __user *arg)
+{
+ struct logger_tag tag;
+ size_t len;
+
+ if (copy_from_user(&tag, arg, sizeof(struct logger_tag)))
+ return -EFAULT;
+
+ len = tag.len - 1;
+ if (len > DLOG_MAX_TAGNAME)
+ return -EINVAL;
+
+ if (copy_from_user(writer->tag, (void*)(uintptr_t)tag.ptr, tag.len))
+ return -EFAULT;
+
+ writer->tag[len] = '\0';
+ writer->tag_len = len;
+
+ return 0;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct logger_pdata *pdata = file->private_data;
+ long ret = -EINVAL;
+ void __user *argp = (void __user *)arg;
+
+ mutex_lock(&log.mutex);
+
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = pdata->reader;
+
+ switch (cmd) {
+ case LOGGER_GET_LOG_BUF_SIZE:
+ ret = log.size;
+ break;
+ case LOGGER_GET_LOG_LEN:
+ ret = logger_get_log_len(reader);
+ break;
+ case LOGGER_GET_NEXT_ENTRY_LEN:
+ ret = logger_get_next_entry_len(reader);
+ break;
+ case LOGGER_GET_VERSION:
+ ret = reader->r_ver;
+ break;
+ case LOGGER_SET_VERSION:
+ ret = logger_set_version(reader, argp);
+ break;
+ case LOGGER_SET_DEV_ID:
+ ret = logger_set_logger_ids(reader, (int)arg);
+ break;
+ case LOGGER_GET_BACKEND_FILTER_STATE:
+ ret = get_backend_logfilter_enable();
+ break;
+ case LOGGER_SET_GLOBAL_PRIORITY:
+ ret = logger_set_global_priority(reader, argp);
+ break;
+ case LOGGER_SET_TAG_FILTER_INFO:
+ ret = logger_set_tag_filter_info(reader, argp);
+ break;
+ case LOGGER_SET_PID:
+ ret = logger_set_pid(reader, argp);
+ break;
+ case LOGGER_SET_EXTENDED_BUFFER:
+ ret = logger_ext_enable();
+ break;
+ }
+ }
+
+ if (file->f_mode & FMODE_WRITE) {
+ struct logger_writer *writer = pdata->writer;
+
+ switch (cmd) {
+ case LOGGER_FLUSH_LOG:
+ ret = logger_flush_log(file);
+ break;
+ case LOGGER_SET_PRIO:
+ ret = logger_set_prio(writer, argp);
+ break;
+ case LOGGER_SET_TAG:
+ ret = logger_set_tag(writer, argp);
+ break;
+ }
+ }
+
+ mutex_unlock(&log.mutex);
+
+ return ret;
+}
+
+static const struct file_operations logger_fops = {
+ .owner = THIS_MODULE,
+ .read = logger_read,
+ .write_iter = logger_write_iter,
+ .poll = logger_poll,
+ .unlocked_ioctl = logger_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = logger_ioctl,
+#endif
+ .open = logger_open,
+ .release = logger_release,
+};
+
+static int __init logger_init(void)
+{
+ char *device_name[] = {
+ [LOGGER_ID_MAIN] = LOGGER_LOG_MAIN,
+ [LOGGER_ID_EVENTS] = LOGGER_LOG_EVENTS,
+ [LOGGER_ID_RADIO] = LOGGER_LOG_RADIO,
+ [LOGGER_ID_SYSTEM] = LOGGER_LOG_SYSTEM,
+ };
+ int ret, i;
+
+ INIT_LIST_HEAD(&log.readers);
+ mutex_init(&log.mutex);
+ log.w_off = 0;
+ log.head = 0;
+ log.size = log.eot = LOGGER_BUFFER_SIZE;
+
+ for (i = LOGGER_ID_MAIN; i < NR_LOGGER_IDS; i++) {
+ log.miscs[i].minor = MISC_DYNAMIC_MINOR;
+ log.miscs[i].name = device_name[i];
+ log.miscs[i].fops = &logger_fops;
+ log.miscs[i].mode = (S_IRUGO | S_IWUGO);
+#ifdef CONFIG_SECURITY_SMACK_SET_DEV_SMK_LABEL
+ log.miscs[i].lab_smk64 = LOGGER_SMACK_LABEL,
+#endif
+ ret = misc_register(&log.miscs[i]);
+ if (unlikely(ret)) {
+ pr_err("failed to register misc device for log '%s'!\n", device_name[i]);
+ goto out;
+ }
+ }
+
+ ret = logger_policy_init();
+ if (unlikely(ret))
+ goto out;
+
+ ret = logger_stats_init();
+
+ logger_ext_enable();
+ vlogger_init();
+
+out:
+ return ret;
+}
+
+static void __exit logger_exit(void)
+{
+ int i;
+
+ logger_ext_disable();
+ logger_stats_exit();
+ logger_policy_exit();
+ vlogger_exit();
+
+ for (i = LOGGER_ID_MAIN; i < NR_LOGGER_IDS; i++)
+ misc_deregister(&log.miscs[i]);
+
+ if (log.buffer)
+ vfree(log.buffer);
+}
+
+device_initcall(logger_init);
+module_exit(logger_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Love, <rlove@google.com>");
+MODULE_DESCRIPTION("Android Logger");
--- /dev/null
+/*
+ * logger_policy: A Logging Policy for Tizen TV.
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "logger: " fmt
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+
+#include "logger.h"
+#include "logger_policy.h"
+#include "logger_filter.h"
+
+static LIST_HEAD(policy_filter_list);
+static DEFINE_MUTEX(policy_filter_lock);
+static atomic_t policy_filter_count = ATOMIC_INIT(0);
+
+static LIST_HEAD(policy_qos_bypass_list);
+static DEFINE_MUTEX(policy_qos_bypass_lock);
+static int policy_qos_bypass_count = -1;
+
+static LIST_HEAD(policy_qos_controller_list);
+static DEFINE_MUTEX(policy_qos_controller_lock);
+static int policy_qos_controller_count = 0;
+
+static int policy_min_priority = 3;
+static int policy_max_priority = 6;
+static int backend_filter_enabled = 0;
+static int verbose_mode_enabled = 0;
+
+module_param_named(min_priority, policy_min_priority, int, 0644);
+module_param_named(max_priority, policy_max_priority, int, 0644);
+module_param_named(backend_filter, backend_filter_enabled, int, 0644);
+module_param_named(verbose_mode, verbose_mode_enabled, int, 0644);
+
+static atomic_t policy_status = ATOMIC_INIT(1);
+static atomic_t policy_logger_enable = ATOMIC_INIT(1);
+
+/* hash table size(2^14) is bigger than max user processes(13233) */
+#define POLICY_HASH_BIT (14)
+
+static DECLARE_HASHTABLE(policy_hash, POLICY_HASH_BIT);
+static DEFINE_MUTEX(policy_lock);
+
+static int policy_qos_limit = 0;
+static int policy_qos_limit_default = 0;
+static int policy_qos_interval = 10;
+static int policy_qos_controller_main = 0;
+
+module_param_named(qos_limit, policy_qos_limit_default, int, 0644);
+module_param_named(qos_interval, policy_qos_interval, int, 0644);
+
+/**
+ * get_backend_logfilter_enable() - get backend_filter_enabled value
+ */
+int get_backend_logfilter_enable() {
+ return !!backend_filter_enabled;
+}
+
+/**
+ * qos_is_available() - return true if qos is available
+ */
+static inline bool qos_is_available(void)
+{
+ return (policy_qos_limit_default != 0);
+}
+
+/**
+ * set_policy_status() - set policy status
+ * @status: the value for status
+ */
+static inline void set_policy_status(int status)
+{
+ atomic_set(&policy_status, !!status);
+ pr_info("policy_status is set to %d\n", atomic_read(&policy_status));
+}
+
+/**
+ * get_policy_status() - get policy status
+ */
+static inline int get_policy_status(void)
+{
+ return atomic_read(&policy_status);
+}
+
+/**
+ * set_policy_logger_enable() - set logger status
+ * @enable: the value for logger status
+ */
+static inline void set_policy_logger_enable(int enable)
+{
+ atomic_set(&policy_logger_enable, !!enable);
+ pr_info("policy_logger_enable is set to %d\n", atomic_read(&policy_logger_enable));
+}
+
+/**
+ * get_policy_logger_enable() - get logger status
+ */
+static inline int get_policy_logger_enable(void)
+{
+ return atomic_read(&policy_logger_enable);
+}
+
+/**
+ * get_policy_tag() - return a tag, if it does not exist, make a new one
+ * @qos: the qos structure
+ * @name: the name of tag
+ */
+static struct policy_tag *get_policy_tag(struct policy_qos *qos,
+ const char *name)
+{
+ struct policy_tag *tag;
+
+ list_for_each_entry(tag, &qos->tag_list, node) {
+ if (!strcmp(tag->name, name))
+ return tag;
+ }
+
+ tag = kzalloc(sizeof(*tag), GFP_KERNEL);
+ if (tag) {
+ strncpy(tag->name, name, sizeof(tag->name) - 1);
+ list_add_tail(&tag->node, &qos->tag_list);
+
+ qos->num_tags++;
+ return tag;
+ }
+
+ return NULL;
+}
+
+/**
+ * set_policy_qos_bypass() - toggle a qos_bypass process by the given name
+ * @name: the name of qos_bypass process
+ */
+static int set_policy_qos_bypass(const char *name)
+{
+ struct policy_qos_bypass *qos_bypass, *tmp_qos_bypass;
+
+ mutex_lock(&policy_qos_bypass_lock);
+ list_for_each_entry_safe(qos_bypass, tmp_qos_bypass, &policy_qos_bypass_list, node) {
+ if (!strncmp(qos_bypass->name, name, strlen(qos_bypass->name))) {
+ list_del(&qos_bypass->node);
+ kfree(qos_bypass);
+ policy_qos_bypass_count--;
+
+ mutex_unlock(&policy_qos_bypass_lock);
+ pr_info("remove %s to qos_bypass\n", name);
+
+ return 0;
+ }
+ }
+
+ qos_bypass = kzalloc(sizeof(*qos_bypass), GFP_KERNEL);
+ if (!qos_bypass) {
+ mutex_unlock(&policy_qos_bypass_lock);
+ return -ENOMEM;
+ }
+
+ strncpy(qos_bypass->name, name, sizeof(qos_bypass->name));
+ qos_bypass->name[LOGGER_POLICY_MAX_TASK_NAME - 1] = '\0';
+ list_add_tail(&qos_bypass->node, &policy_qos_bypass_list);
+
+ if (policy_qos_bypass_count == -1)
+ policy_qos_bypass_count = 0;
+ policy_qos_bypass_count++;
+
+ mutex_unlock(&policy_qos_bypass_lock);
+ pr_info("add %s to qos_bypass\n", name);
+
+ return 0;
+}
+
+/**
+ * update_policy_qos_bypass() - check if the process could bypass qos filter or not, and set qos_bypass property.
+ * @qos: the qos property structure of the process
+ */
+static bool update_policy_qos_bypass(struct policy_qos *qos)
+{
+ struct policy_qos_bypass *qos_bypass;
+ char task_comm[LOGGER_POLICY_MAX_TASK_NAME];
+
+ mutex_lock(&policy_qos_bypass_lock);
+
+ if (policy_qos_bypass_count == -1) {
+ mutex_unlock(&policy_qos_bypass_lock);
+ return false;
+ }
+
+ get_task_comm(task_comm, current->group_leader);
+ list_for_each_entry(qos_bypass, &policy_qos_bypass_list, node) {
+ if (!strncmp(qos_bypass->name, task_comm, strlen(qos_bypass->name))) {
+ if (!qos->qos_bypass) {
+ qos->qos_bypass = 1;
+ pr_info("Set qos_bypass process : %s", task_comm);
+ }
+ mutex_unlock(&policy_qos_bypass_lock);
+ return true;
+ }
+ }
+
+ if (qos->qos_bypass) {
+ qos->qos_bypass = 0;
+ pr_info("Unset qos_bypass process : %s", task_comm);
+ }
+
+ mutex_unlock(&policy_qos_bypass_lock);
+ return false;
+}
+
+/**
+ * set_policy_filter() - toggle a filter by the given name
+ * @name: the name of filter
+ */
+static int set_policy_filter(const char *name)
+{
+ struct policy_filter *filter, *tmp_filter;
+ int ret = -ENOMEM;
+
+ mutex_lock(&policy_filter_lock);
+ list_for_each_entry_safe(filter, tmp_filter, &policy_filter_list, node) {
+ if (!strcmp(filter->name, name)) {
+ list_del(&filter->node);
+ kfree(filter);
+ atomic_dec(&policy_filter_count);
+
+ mutex_unlock(&policy_filter_lock);
+ pr_info("remove %s to filter\n", name);
+
+ return 0;
+ }
+ }
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (filter) {
+ strncpy(filter->name, name, sizeof(filter->name) - 1);
+ list_add_tail(&filter->node, &policy_filter_list);
+ atomic_inc(&policy_filter_count);
+
+ ret = 0;
+ }
+
+ mutex_unlock(&policy_filter_lock);
+ pr_info("add %s to filter\n", name);
+
+ return ret;
+}
+
+/**
+ * check_policy_filter() - check a filter whether it does exist or not
+ * @name: the name of filter
+ */
+static int check_policy_filter(const char *name)
+{
+ struct policy_filter *filter;
+
+ if (!atomic_read(&policy_filter_count))
+ return -EPERM;
+
+ mutex_lock(&policy_filter_lock);
+ list_for_each_entry(filter, &policy_filter_list, node) {
+ if (!strcmp(filter->name, name) || !strcmp(filter->name, "*")) {
+ mutex_unlock(&policy_filter_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&policy_filter_lock);
+ return -EPERM;
+}
+
+/**
+ * check_policy_qos() - check a qos with the given arguments
+ * @qos: the qos structure
+ * @name: the name of tag
+ */
+static int check_policy_qos(struct policy_qos *qos, const char *name)
+{
+ struct policy_tag *tag;
+ int upper_limit = policy_qos_limit * policy_qos_interval;
+ int ret = 0;
+ int ts = (int)jiffies / HZ;
+
+ if (upper_limit == 0)
+ return 0;
+
+ mutex_lock(&qos->lock);
+
+ if (ts >= qos->reset_timer) {
+ int total_dropped = 0;
+ bool tag_found = false;
+
+ qos->activated = false;
+ qos->count = 1;
+ qos->reset_timer = ts + policy_qos_interval;
+
+ update_policy_qos_bypass(qos);
+
+ list_for_each_entry(tag, &qos->tag_list, node) {
+ if (!tag_found && !strcmp(tag->name, name)) {
+ tag->count = 1;
+ tag_found = true;
+ } else {
+ tag->count = 0;
+ }
+
+ if (tag->limited) {
+ pr_info("%s (p:%d)'s \"%s\" tagged %d logs were dropped\n",
+ current->group_leader->comm, current->tgid,
+ tag->name, tag->limited);
+ total_dropped += tag->limited;
+ tag->limited = 0;
+ }
+ }
+
+ if (total_dropped)
+ pr_err("%s (p:%d)'s %d logs were dropped from %d tags\n",
+ current->group_leader->comm, current->tgid,
+ total_dropped, qos->num_tags);
+ goto out;
+ }
+
+ if (qos->qos_bypass)
+ goto out;
+
+ /* check process */
+ if (qos->count < upper_limit) {
+ qos->count++;
+
+ tag = get_policy_tag(qos, name);
+ if (tag)
+ tag->count++;
+ } else {
+ if (!qos->activated) {
+ pr_err("%s (p:%d) exceeds the log limit:%d\n",
+ current->group_leader->comm, current->tgid, upper_limit);
+ qos->activated = true;
+ qos->reset_timer = ts + policy_qos_interval;
+ }
+
+ /* check tag */
+ tag = get_policy_tag(qos, name);
+ if (tag) {
+ int num_tags = min_t(int, qos->num_tags, LOGGER_POLICY_MAX_TAG_COUNT);
+
+ if (tag->count < (upper_limit / num_tags)) {
+ tag->count++;
+ } else {
+ tag->limited++;
+ ret = -EPERM;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&qos->lock);
+ return ret;
+}
+
+/**
+ * logger_policy_check() - check a policy for the associated logger
+ * @policy: the policy structure
+ * @priority: the priority of log
+ * @tag: the tag of log
+ */
+int logger_policy_check(struct logger_policy *policy, uint8_t priority, const char *tag)
+{
+ /* if policy is disabled, return immediately to print the log. */
+ if (!get_policy_status())
+ return 0;
+
+ /* if logger is disabled, don't do anything. */
+ if (!get_policy_logger_enable())
+ return -EPERM;
+
+ /* check policies. */
+ if (priority <= policy_min_priority) {
+ if (check_policy_filter(tag))
+ return -EPERM;
+ } else if (priority > policy_min_priority && priority < policy_max_priority) {
+ if (check_policy_qos(&policy->qos, tag))
+ return -EDQUOT;
+ }
+
+ return 0;
+}
+
+/* hash key is only based on PID so as to avoid duplicate policy structure creation in case of launchpad loaded apps */
+static unsigned int get_hash(int pid)
+{
+ /* fnv32 hash */
+ unsigned int hash = 2166136261U + pid;
+ int p = pid;
+
+ while(p) {
+ hash = (hash ^ ((p%10) + 0x30)) * 0x01000193;
+ p /= 10;
+ }
+ return (hash % (1<<POLICY_HASH_BIT));
+}
+
+/**
+ * logger_policy_get() - make a new policy structure
+ * @logger_id: the logger id
+ */
+struct logger_policy *logger_policy_get(void)
+{
+ struct logger_policy *policy;
+ unsigned int hash_key = get_hash(current->tgid);
+
+ mutex_lock(&policy_lock);
+ hash_for_each_possible(policy_hash, policy, node, hash_key) {
+ if (policy->pid == current->tgid) {
+ policy->ref ++;
+ mutex_unlock(&policy_lock);
+ return policy;
+ }
+ }
+ mutex_unlock(&policy_lock);
+ if (verbose_mode_enabled)
+ pr_err("logger_open called by %d:%s", (int)current->tgid, current->group_leader->comm);
+ policy = kzalloc(sizeof(*policy), GFP_KERNEL);
+ if (!policy)
+ return ERR_PTR(-ENOMEM);
+
+ update_policy_qos_bypass(&policy->qos);
+
+ INIT_LIST_HEAD(&policy->qos.tag_list);
+ mutex_init(&policy->qos.lock);
+
+ policy->pid = current->tgid;
+ policy->ref = 1;
+
+ mutex_lock(&policy_lock);
+ hash_add(policy_hash, &policy->node, hash_key);
+ mutex_unlock(&policy_lock);
+
+ return policy;
+}
+
+/**
+ * logger_policy_put() - release a policy structure
+ * @policy: the policy structure to release
+ */
+void logger_policy_put(struct logger_policy *policy)
+{
+ struct policy_qos *qos = &policy->qos;
+ struct policy_tag *tag, *tmp_tag;
+
+ if (-- policy->ref > 0)
+ return;
+
+ mutex_lock(&qos->lock);
+ list_for_each_entry_safe(tag, tmp_tag, &qos->tag_list, node) {
+ list_del(&tag->node);
+ kfree(tag);
+ }
+ mutex_unlock(&qos->lock);
+
+ mutex_lock(&policy_lock);
+ hash_del(&policy->node);
+ mutex_unlock(&policy_lock);
+ kfree(policy);
+}
+
+/* file ops. */
+static long logger_policy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ char name[LOGGER_POLICY_MAX_TAG_NAME + 1] = {0, };
+ int var;
+ int err = 0;
+
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case LOGGER_POLICY_SET_FILTER:
+ if (copy_from_user(name, argp, sizeof(name) - 1))
+ return -EFAULT;
+
+ err = set_policy_filter(name);
+ break;
+ case LOGGER_POLICY_SET_STATUS:
+ if (copy_from_user(&var, argp, sizeof(var)))
+ return -EFAULT;
+
+ set_policy_status(var);
+ break;
+ case LOGGER_POLICY_SET_LOGGER_ENABLE:
+ if (copy_from_user(&var, argp, sizeof(var)))
+ return -EFAULT;
+
+ set_policy_logger_enable(var);
+ break;
+ case LOGGER_POLICY_SET_QOS:
+ if (!qos_is_available())
+ return -EPERM;
+
+ if (policy_qos_controller_main != current->tgid)
+ return -EPERM;
+
+ if (copy_from_user(&var, argp, sizeof(var)))
+ return -EFAULT;
+
+ if (var < 0)
+ return -EINVAL;
+
+ policy_qos_limit = var;
+ pr_info("policy qos limit : %d\n", policy_qos_limit);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int logger_policy_open(struct inode *inode, struct file *file)
+{
+ if (!qos_is_available())
+ return -EPERM;
+
+ if (file->f_mode & FMODE_WRITE) {
+ struct policy_qos_controller *qos_controller;
+
+ qos_controller = kzalloc(sizeof(*qos_controller), GFP_KERNEL);
+ if (!qos_controller)
+ return -ENOMEM;
+
+ policy_qos_limit = policy_qos_limit_default;
+ qos_controller->pid = (int)current->tgid;
+
+ mutex_lock(&policy_qos_controller_lock);
+ list_add(&qos_controller->node, &policy_qos_controller_list);
+ policy_qos_controller_count++;
+ policy_qos_controller_main = qos_controller->pid;
+ mutex_unlock(&policy_qos_controller_lock);
+ pr_err("set qos controller PID:%d", policy_qos_controller_main);
+ }
+ return 0;
+}
+
+static int logger_policy_release(struct inode *ignored, struct file *file)
+{
+ if (file->f_mode & FMODE_WRITE) {
+ struct policy_qos_controller *controller, *tmp_controller;
+
+ mutex_lock(&policy_qos_controller_lock);
+ list_for_each_entry_safe(controller, tmp_controller, &policy_qos_controller_list, node) {
+ if (controller->pid == (int)current->tgid) {
+ list_del(&controller->node);
+ kfree(controller);
+ policy_qos_controller_count--;
+ pr_info("release qos controller : %d(remain:%d), main:%d",
+ current->tgid, policy_qos_controller_count, policy_qos_controller_main);
+ }
+ }
+
+ if (policy_qos_controller_main == (int)current->tgid) {
+ struct policy_qos_controller *latest_controller;
+ int main_controller_next;
+
+ if (policy_qos_controller_count == 0) {
+ main_controller_next = 0;
+ policy_qos_limit = policy_qos_limit_default;
+ } else {
+ latest_controller = list_first_entry(&policy_qos_controller_list, struct policy_qos_controller, node);
+ main_controller_next = latest_controller->pid;
+ }
+
+ pr_err("reset qos controller : %d to %d",
+ policy_qos_controller_main, main_controller_next);
+ policy_qos_controller_main = main_controller_next;
+ }
+
+ mutex_unlock(&policy_qos_controller_lock);
+ }
+ return 0;
+}
+
+static const struct file_operations logger_policy_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = logger_policy_ioctl,
+#if defined CONFIG_COMPAT
+ .compat_ioctl = logger_policy_ioctl, // modified ioctl to get same number
+#endif
+ .open = logger_policy_open,
+ .release = logger_policy_release,
+};
+
+/* sysfs callbacks */
+static ssize_t filter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct policy_filter *filter;
+ int len = 0;
+
+ mutex_lock(&policy_filter_lock);
+ list_for_each_entry(filter, &policy_filter_list, node)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s\n", filter->name);
+ mutex_unlock(&policy_filter_lock);
+
+ return len;
+}
+
+static ssize_t filter_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ char name[LOGGER_POLICY_MAX_TAG_NAME + 1] = {0, };
+ int ret;
+
+ if (count > LOGGER_POLICY_MAX_TAG_NAME + 1)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%32s", name);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = set_policy_filter(name);
+ return (ret == 0 ? count : ret);
+}
+static DEVICE_ATTR_RW(filter);
+
+static ssize_t backend_logfilter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", get_backend_logfilter_enable());
+}
+
+static DEVICE_ATTR_RO(backend_logfilter);
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", get_policy_status());
+}
+
+static ssize_t status_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int value;
+
+ if (kstrtoint(buf, 10, &value) < 0) {
+ pr_err("Failed to get value(%m)");
+ return 0;
+ }
+ set_policy_status(value);
+
+ return count;
+}
+static DEVICE_ATTR_RW(status);
+
+static ssize_t logger_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", get_policy_logger_enable());
+}
+
+static ssize_t logger_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int value;
+
+ if (kstrtoint(buf, 10, &value) < 0) {
+ pr_err("Failed to get value(%m)");
+ return 0;
+ }
+
+ set_policy_logger_enable(value);
+
+ return count;
+}
+static DEVICE_ATTR_RW(logger_enable);
+
+static ssize_t qos_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", policy_qos_interval);
+}
+
+static ssize_t qos_interval_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int value;
+
+ if (kstrtoint(buf, 10, &value) < 0) {
+ pr_err("Failed to get value(%m)");
+ return 0;
+ }
+
+ if (value > 0)
+ policy_qos_interval = value;
+
+ return count;
+}
+static DEVICE_ATTR_RW(qos_interval);
+
+static ssize_t qos_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "limit:%d interval:%d\n",
+ policy_qos_limit, policy_qos_interval);
+}
+
+static ssize_t qos_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int value;
+
+ if (!qos_is_available()) {
+ pr_err("QoS is not available");
+ return 0;
+ }
+
+ if (kstrtoint(buf, 10, &value) < 0) {
+ pr_err("Failed to get value(%m)");
+ return 0;
+ }
+
+ if (value < 0) {
+ pr_err("The value is invalid: %d", value);
+ return 0;
+ }
+
+ policy_qos_limit = value;
+ pr_info("policy qos limit : %u\n", policy_qos_limit);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(qos);
+
+static ssize_t qos_bypass_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct policy_qos_bypass *qos_bypass;
+ int len = 0;
+
+ mutex_lock(&policy_qos_bypass_lock);
+ list_for_each_entry(qos_bypass, &policy_qos_bypass_list, node)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s\n", qos_bypass->name);
+
+ mutex_unlock(&policy_qos_bypass_lock);
+
+ return len;
+}
+
+static ssize_t qos_bypass_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ char name[LOGGER_POLICY_MAX_TASK_NAME] = {0, };
+ int ret;
+
+ if (count > LOGGER_POLICY_MAX_TASK_NAME)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%16s", name);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = set_policy_qos_bypass(name);
+ return (ret == 0 ? count : ret);
+}
+static DEVICE_ATTR_RW(qos_bypass);
+
+static struct attribute *logger_policy_attributes[] = {
+ &dev_attr_filter.attr,
+ &dev_attr_status.attr,
+ &dev_attr_logger_enable.attr,
+ &dev_attr_qos_interval.attr,
+ &dev_attr_qos.attr,
+ &dev_attr_backend_logfilter.attr,
+ &dev_attr_qos_bypass.attr,
+ NULL,
+};
+
+static const struct attribute_group logger_policy_attr_group = {
+ .attrs = logger_policy_attributes,
+};
+
+static const char miscdev_name[] = LOGGER_POLICY_MISCDEV_NAME;
+static struct miscdevice logger_policy_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = miscdev_name,
+ .fops = &logger_policy_fops,
+ .mode = (S_IRUGO|S_IWUGO),
+#ifdef CONFIG_SECURITY_SMACK_SET_DEV_SMK_LABEL
+ .lab_smk64 = LOGGER_POLICY_SMACK_LABEL,
+#endif
+};
+
+int logger_policy_init(void)
+{
+ struct device *dev;
+ int err = 0;
+
+ err = misc_register(&logger_policy_miscdev);
+ if (err)
+ goto err_exit;
+
+ dev = logger_policy_miscdev.this_device;
+ err = sysfs_create_group(&dev->kobj, &logger_policy_attr_group);
+ if (err) {
+ dev_err(dev, "failed to create sysfs nodes with (%d) error\n", err);
+ goto err_deregister;
+ }
+
+ hash_init(policy_hash);
+ policy_qos_limit = policy_qos_limit_default;
+
+ return 0;
+
+err_deregister:
+ misc_deregister(&logger_policy_miscdev);
+err_exit:
+ return err;
+}
+
+void logger_policy_exit(void)
+{
+ struct device *dev = logger_policy_miscdev.this_device;
+
+ sysfs_remove_group(&dev->kobj, &logger_policy_attr_group);
+ misc_deregister(&logger_policy_miscdev);
+}
+
--- /dev/null
+/*
+ * logger_stats: Logger Stats for Tizen TV.
+ *
+ * Copyright (C) 2019 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/uio.h>
+#include <linux/sched.h>
+#include "logger.h"
+#include "logger_policy.h"
+#include "logger_stats.h"
+
+/**
+ * struct logger_stats - statistics data for logger
+ * @bytes: number of bytes
+ * @count: count of logs
+ */
+struct logger_stats {
+ u64 bytes;
+ u64 count;
+};
+
+static DEFINE_MUTEX(g_stats_lock);
+static int g_stats_enable = 0;
+static pid_t g_owner = 0;
+static struct logger_stats g_stats[NR_LOGGER_IDS][LOGGER_STATS_MAX] = {0,};
+static char g_dlogutil_stats[LOGGER_STATS_SIZE] = {0, };
+
+module_param_named(stats_enable, g_stats_enable, int, 0644);
+
+/*
+ * logger_update_stats - update log stats
+ */
+void logger_stats_update(int stats_id, int log_id, int bytes, int pid)
+{
+ if (!g_stats_enable)
+ return;
+
+ if ((stats_id == LOGGER_STATS_READ || stats_id == LOGGER_STATS_OVERFLOW) && pid != g_owner)
+ return;
+
+ mutex_lock(&g_stats_lock);
+ g_stats[log_id][stats_id].bytes += bytes;
+ g_stats[log_id][stats_id].count ++;
+ mutex_unlock(&g_stats_lock);
+}
+
+/*
+ * The caller needs to hold g_stats_lock.
+ */
+static int _print_stats(struct logger_stats st[LOGGER_STATS_MAX], char *buf)
+{
+ const char *HD[LOGGER_STATS_MAX] = {"write", "read", "overflow", "drop"};
+ int i;
+ int len = 0;
+
+ for (i = 0; i < LOGGER_STATS_MAX; i++) {
+ int r = snprintf(buf, PAGE_SIZE - len, "%s:%llu(%llu) ",
+ HD[i], st[i].bytes, st[i].count);
+ if (r < 0) {
+ pr_err("Failed to write stats to buffer(%m)");
+ return -1;
+ }
+ buf += r;
+ len += r;
+ if (len > PAGE_SIZE) {
+ pr_err("Buffer overflow");
+ return -1;
+ }
+ }
+ *(buf-1) = '\n';
+
+ return len;
+}
+
+#define LOGGER_STAT_ATTR(dev, idx) \
+static ssize_t dev##_show(struct device *d, struct device_attribute *a, char *b) \
+{ \
+ int ret; \
+ mutex_lock(&g_stats_lock); \
+ ret = _print_stats(g_stats[idx], b); \
+ mutex_unlock(&g_stats_lock); \
+ return ret; \
+} \
+static DEVICE_ATTR_RO(dev)
+
+LOGGER_STAT_ATTR(main, LOGGER_ID_MAIN);
+LOGGER_STAT_ATTR(events, LOGGER_ID_EVENTS);
+LOGGER_STAT_ATTR(radio, LOGGER_ID_RADIO);
+LOGGER_STAT_ATTR(system, LOGGER_ID_SYSTEM);
+
+static ssize_t total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct logger_stats total[LOGGER_STATS_MAX] = {0, };
+ int i, j;
+ int ret;
+
+ mutex_lock(&g_stats_lock);
+ for (i = 0; i < LOGGER_STATS_MAX; i++) {
+ for (j = 0; j < NR_LOGGER_IDS; j++) {
+ total[i].bytes += g_stats[j][i].bytes;
+ total[i].count += g_stats[j][i].count;
+ }
+ }
+ ret = _print_stats(total, buf);
+ mutex_unlock(&g_stats_lock);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RO(total);
+
+static ssize_t dlogutil_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+
+ if (g_owner == 0)
+ return 0;
+
+ mutex_lock(&g_stats_lock);
+ ret = snprintf(buf, PAGE_SIZE, "pid:%d %s\n", g_owner, g_dlogutil_stats);
+ mutex_unlock(&g_stats_lock);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RO(dlogutil);
+
+static ssize_t enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", g_stats_enable);
+}
+
+static ssize_t enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ long value;
+
+ if (kstrtol(buf, 10, &value) < 0) {
+ pr_err("Failed to get value(%m)");
+ return 0;
+ }
+
+ g_stats_enable = !!value;
+
+ if (g_stats_enable == 0) {
+ mutex_lock(&g_stats_lock);
+ memset(g_stats, 0, sizeof(g_stats));
+ memset(g_dlogutil_stats, 0, sizeof(g_dlogutil_stats));
+ mutex_unlock(&g_stats_lock);
+ }
+ return count;
+}
+static DEVICE_ATTR_RW(enable);
+
+static struct attribute *logger_stats_attributes[] = {
+ &dev_attr_main.attr,
+ &dev_attr_events.attr,
+ &dev_attr_radio.attr,
+ &dev_attr_system.attr,
+ &dev_attr_total.attr,
+ &dev_attr_dlogutil.attr,
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static const struct attribute_group logger_stats_attr_group = {
+ .attrs = logger_stats_attributes,
+};
+
+static long logger_stats_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *parg = (void __user *)arg;
+ long ret = 0;
+
+ if (!g_stats_enable)
+ return -EPERM;
+
+ switch (cmd) {
+ case LOGGER_STATS_SEND:
+ mutex_lock(&g_stats_lock);
+ if (copy_from_user(g_dlogutil_stats, parg, sizeof(g_dlogutil_stats)))
+ ret = -EFAULT;
+
+ mutex_unlock(&g_stats_lock);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int logger_stats_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ if (file->f_mode & FMODE_WRITE) {
+ mutex_lock(&g_stats_lock);
+ if (g_owner) {
+ pr_err("Write permission has been taken by %d", g_owner);
+ ret = -EPERM;
+ } else {
+ g_owner = current->tgid;
+ }
+ mutex_unlock(&g_stats_lock);
+ }
+ return ret;
+}
+
+static int logger_stats_release(struct inode *ignored, struct file *file)
+{
+ int i;
+
+ if (file->f_mode & FMODE_WRITE) {
+ mutex_lock(&g_stats_lock);
+ g_owner = 0;
+ for (i = 0; i < NR_LOGGER_IDS; i++) {
+ memset(&g_stats[i][LOGGER_STATS_READ], 0, sizeof(struct logger_stats));
+ memset(&g_stats[i][LOGGER_STATS_OVERFLOW], 0, sizeof(struct logger_stats));
+ }
+ memset(g_dlogutil_stats, 0, sizeof(g_dlogutil_stats));
+ mutex_unlock(&g_stats_lock);
+ }
+ return 0;
+}
+
+static const struct file_operations logger_stats_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = logger_stats_ioctl,
+#if defined CONFIG_COMPAT
+ .compat_ioctl = logger_stats_ioctl,
+#endif
+ .open = logger_stats_open,
+ .release = logger_stats_release,
+};
+
+static const char miscdev_name[] = LOGGER_STATS_MISCDEV_NAME;
+static struct miscdevice logger_stats_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = miscdev_name,
+ .fops = &logger_stats_fops,
+ .mode = (S_IRUGO|S_IWUGO),
+#ifdef CONFIG_SECURITY_SMACK_SET_DEV_SMK_LABEL
+ .lab_smk64 = LOGGER_STATS_SMACK_LABEL,
+#endif
+};
+
+int logger_stats_init(void)
+{
+ struct device *dev;
+ int err = 0;
+
+ err = misc_register(&logger_stats_miscdev);
+ if (err)
+ goto err_exit;
+
+ dev = logger_stats_miscdev.this_device;
+ err = sysfs_create_group(&dev->kobj, &logger_stats_attr_group);
+ if (err) {
+ dev_err(dev, "failed to create sysfs nodes with (%d) error\n", err);
+ goto err_deregister;
+ }
+
+ g_stats_enable = 1;
+
+ return 0;
+
+err_deregister:
+ misc_deregister(&logger_stats_miscdev);
+err_exit:
+ return err;
+}
+
+void logger_stats_exit(void)
+{
+ struct device *dev = logger_stats_miscdev.this_device;
+
+ sysfs_remove_group(&dev->kobj, &logger_stats_attr_group);
+ misc_deregister(&logger_stats_miscdev);
+}
--- /dev/null
+/*
+ * drivers/misc/vlogger.c
+ *
+ * A Logging Subsystem for Tizen TV
+ *
+ * Copyright (C) 2021 Samsung Electronics Co., Ltd
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "vlogger: " fmt
+
+#include <linux/module.h>
+#include <linux/threads.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/mm_types.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/moduleparam.h>
+#include <linux/trace_clock.h>
+#include <linux/timekeeping.h>
+#include <linux/completion.h>
+#ifdef CONFIG_TIZEN_VLOGGER
+#include <linux/vlogger.h>
+#endif
+
+#define MB (1 << 20)
+#define KB (1 << 10)
+
+#define DEVICE_COUNT (8)
+#define MAP_SIZE (4 * MB)
+#define BUFFER_SIZE (DEVICE_COUNT * MAP_SIZE)
+#define V_BLOCK_SIZE (2 * KB)
+#define BLOCK_COUNT (BUFFER_SIZE / V_BLOCK_SIZE)
+#define DATA_MAX (V_BLOCK_SIZE - sizeof(struct head_t))
+#define MAX_THREAD (0x8000)
+#define MS_PER_SEC (1000)
+#define NS_PER_SEC (1000000000UL)
+
+#define IOCTL_COMMAND_ALLOC (20745321)
+#define VLOGGER_DEVICE_NAME "vlog"
+#define VLOGGER_SMACK_LABEL "*"
+
+#define MAX_TAG_SIZE (32)
+#define MAX_MSG_SIZE (140)
+
+#define BLOCK_RATIO(count) (count*100/BLOCK_COUNT)
+
+struct thread_t {
+ uint16_t block;
+};
+
+struct entry_t {
+ uint64_t time;
+ uint16_t CRC;
+ uint16_t len;
+
+ char data[0];
+};
+
+struct head_t {
+ uint64_t ts;
+ uint16_t pid;
+ uint16_t tid;
+ uint16_t offset;
+};
+
+struct block_t {
+ struct head_t head;
+ char data[DATA_MAX];
+};
+
+struct queue_t {
+ char name[5];
+ uint16_t front;
+ uint16_t rear;
+ uint16_t count;
+ uint16_t capacity;
+ uint16_t *values;
+};
+
+static struct miscdevice vlogger_device;
+
+static int g_init;
+static char *g_shm_ptr[DEVICE_COUNT];
+static struct thread_t *g_threads;
+
+static struct mutex g_block_mutex;
+static struct mutex g_task_mutex;
+
+static struct queue_t g_free_q;
+
+static int g_max_thread_id;
+
+static uint64_t g_start_time;
+static uint64_t g_hwc_offset;
+
+static int g_task_on;
+static uint32_t g_free_count;
+
+static uint32_t g_err_count;
+
+static struct completion g_completion;
+
+static int g_vlog_enable = 1;
+module_param_named(vlog_enable, g_vlog_enable, int, 0644);
+
+static inline char *get_shared_memory(int dev_index)
+{
+ if (dev_index < 0 || dev_index >= DEVICE_COUNT) {
+ pr_debug("Invalid index: %d\n", dev_index);
+ return NULL;
+ }
+
+ return g_shm_ptr[dev_index];
+}
+
+static inline struct block_t *get_block(uint16_t block_index)
+{
+ uint16_t index = block_index - 1;
+ int offset = index & 0x7FF;
+ char *p = get_shared_memory(index >> 11);
+
+ if (!p) {
+ pr_err("[INVALID BLOCK] index:%d free:%d err:%d", block_index, g_free_q.count, g_err_count);
+ dump_stack();
+ WARN_ON(1);
+ }
+
+ return (struct block_t *)(p + (offset << 11));
+}
+
+static inline void queue_init(struct queue_t *q, const char *name, uint16_t capacity)
+{
+ snprintf(q->name, sizeof(q->name), "%s", name);
+ q->front = 0;
+ q->rear = capacity - 1;
+ q->count = 0;
+ q->capacity = capacity;
+ q->values = kzalloc(capacity * sizeof(uint16_t), GFP_KERNEL);
+}
+
+static inline void queue_deinit(struct queue_t *q)
+{
+ if (q->values)
+ kfree(q->values);
+}
+
+static inline uint16_t queue_pop(struct queue_t *q)
+{
+ uint16_t r;
+
+ if (q->count == 0)
+ return 0;
+
+ r = q->values[q->front++];
+ if (q->front == q->capacity)
+ q->front = 0;
+ q->count--;
+
+ return r;
+}
+
+static inline void queue_push(struct queue_t *q, uint16_t value)
+{
+ if (q->count >= q->capacity) {
+ pr_info("[%s] Queue is full", q->name);
+ return;
+ }
+
+ if (value == 0) {
+ pr_info("[%s] NULL is invalid", q->name);
+ return;
+ }
+
+ q->rear++;
+ if (q->rear == q->capacity)
+ q->rear = 0;
+ q->values[q->rear] = value;
+ q->count++;
+}
+
+static int vlog_task(void *user_data)
+{
+ int i;
+ uint16_t blk;
+
+ do {
+ for (i = 1; i <= g_max_thread_id; i++) {
+ blk = g_threads[i].block;
+ if (blk && get_block(blk)->head.ts < g_start_time) {
+ mutex_lock(&g_block_mutex);
+ queue_push(&g_free_q, blk);
+ g_threads[i].block = 0;
+ mutex_unlock(&g_block_mutex);
+ g_free_count++;
+ }
+ }
+ wait_for_completion_interruptible_timeout(&g_completion, msecs_to_jiffies(MS_PER_SEC * 5));
+ } while (1);
+
+ g_task_on = 0;
+ do_exit(0);
+
+ return 0;
+}
+
+static void run_task(void)
+{
+ struct task_struct *task;
+
+ if (!mutex_trylock(&g_task_mutex))
+ return;
+
+ if (g_task_on) {
+ mutex_unlock(&g_task_mutex);
+ complete(&g_completion);
+ return;
+ }
+ g_task_on = 1;
+ mutex_unlock(&g_task_mutex);
+
+ task = kthread_run(vlog_task, NULL, "vlog_task");
+ if (IS_ERR(task)) {
+ pr_err("Failed to run vlog_task\n");
+ g_task_on = 0;
+ }
+}
+
+static long alloc_block_for_thread(void)
+{
+ int pid = current->tgid;
+ int tid = current->pid;
+ uint16_t blk;
+ struct block_t *block;
+
+ if (tid >= MAX_THREAD) {
+ pr_err("Invalid tid: %d", tid);
+ return -EINVAL;
+ }
+
+ if (g_max_thread_id < tid)
+ g_max_thread_id = tid;
+
+ mutex_lock(&g_block_mutex);
+ blk = g_threads[tid].block;
+ if (blk)
+ queue_push(&g_free_q, blk);
+ blk = queue_pop(&g_free_q);
+ g_threads[tid].block = blk;
+ mutex_unlock(&g_block_mutex);
+
+ if (!blk) {
+ if ((g_err_count++ % 10000) < 3)
+ pr_info("[NO MEMORY] tid:%d free:%d err:%d", tid, g_free_q.count, g_err_count);
+ return -ENOMEM;
+ }
+
+ block = get_block(blk);
+
+ if (g_start_time < block->head.ts)
+ g_start_time = block->head.ts;
+
+ block->head.pid = pid;
+ block->head.tid = tid;
+ block->head.offset = 0;
+ block->head.ts = g_start_time;
+
+ return (long)blk;
+}
+
+static int vlogger_open(struct inode *inodep, struct file *filep)
+{
+ return 0;
+}
+
+static int vlogger_release(struct inode *inodep, struct file *filep)
+{
+ return 0;
+}
+
+static int vlogger_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ const int PAGES_PER_MAP = MAP_SIZE / PAGE_SIZE;
+ int dev_index = (int)vma->vm_pgoff / PAGES_PER_MAP;
+ unsigned long offset = vma->vm_pgoff % PAGES_PER_MAP;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ char *p;
+ struct page *page;
+
+ if (dev_index > DEVICE_COUNT || offset != 0 || size > MAP_SIZE) {
+ pr_err("mmap failed: dev(%d) offset(%lu), size(%lu), pgoff(%lu)\n", dev_index, offset, size, vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ p = get_shared_memory(dev_index);
+ if (p)
+ page = virt_to_page((unsigned long)p);
+ else
+ page = virt_to_page((unsigned long)g_threads);
+
+ return remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size, vma->vm_page_prot);
+}
+
+static ssize_t vlogger_read(struct file *filep, char *buffer, size_t len, loff_t *offset)
+{
+ pr_err("read failed!\n");
+ return -EPERM;
+}
+
+static ssize_t vlogger_write(struct file *filep, const char *buffer, size_t len, loff_t *offset)
+{
+ pr_err("write failed!\n");
+ return -EPERM;
+}
+
+static long vlogger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ if (cmd == IOCTL_COMMAND_ALLOC)
+ return alloc_block_for_thread();
+
+ return -EINVAL;
+}
+
+static const struct file_operations vlogger_fops = {
+ .open = vlogger_open,
+ .read = vlogger_read,
+ .write = vlogger_write,
+ .release = vlogger_release,
+ .mmap = vlogger_mmap,
+ .unlocked_ioctl = vlogger_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vlogger_ioctl,
+#endif
+ .owner = THIS_MODULE,
+};
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int thread_count = 0;
+ int i;
+
+ for (i = 0; i < MAX_THREAD; i++) {
+ if (g_threads[i].block > 0)
+ thread_count ++;
+ }
+
+ return snprintf(buf, PAGE_SIZE,
+ "free(%d%%):%d/%d task_on:%d gc_free:%u error:%u tid_max:%d\n",
+ BLOCK_RATIO(g_free_q.count), g_free_q.count, (g_free_q.count + thread_count),
+ g_task_on, g_free_count, g_err_count, g_max_thread_id);
+}
+
+static DEVICE_ATTR_RO(status);
+
+static ssize_t time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE,
+ "%llu %llu %llu\n", g_start_time, ktime_get_ns(), g_hwc_offset);
+}
+
+static DEVICE_ATTR_RO(time);
+
+static uint16_t g_block = 1;
+static ssize_t block_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long value;
+
+ if (kstrtoul(buf, 10, &value) < 0) {
+ pr_err("Failed to get value");
+ return -EINVAL;
+ }
+
+ if (value < 1 || value > BLOCK_COUNT)
+ return -EINVAL;
+
+ g_block = value;
+
+ return count;
+}
+
+static ssize_t block_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct block_t *block = get_block((uint16_t)g_block);
+
+ if (!block)
+ return snprintf(buf, PAGE_SIZE, "[%d] Invalid block\n", g_block);
+
+ return snprintf(buf, PAGE_SIZE, "[%d] pid:%u tid:%u offset:%u %llu %llu\n",
+ g_block, block->head.pid, block->head.tid, block->head.offset,
+ ((struct entry_t *)(block->data))->time, block->head.ts);
+}
+
+static DEVICE_ATTR_RW(block);
+
+static uint16_t g_thread = 1;
+static ssize_t thread_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ long value;
+
+ if (kstrtol(buf, 10, &value) < 0) {
+ pr_err("Failed to get value");
+ return -EINVAL;
+ }
+
+ if (value < 1 || value > g_max_thread_id)
+ return -EINVAL;
+
+ g_thread = value;
+
+ return count;
+}
+
+static ssize_t thread_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "[%d] block:%u\n", g_thread, g_threads[g_thread].block);
+}
+
+static DEVICE_ATTR_RW(thread);
+
+static struct attribute *vlogger_attributes[] = {
+ &dev_attr_status.attr,
+ &dev_attr_time.attr,
+ &dev_attr_block.attr,
+ &dev_attr_thread.attr,
+ NULL,
+};
+
+static const struct attribute_group vlogger_attr_group = {
+ .attrs = vlogger_attributes,
+};
+
+#ifdef CONFIG_TIZEN_VLOGGER
+static inline struct block_t *get_valid_block(int tid, size_t len)
+{
+ uint16_t blk = 0;
+ long r;
+
+ if (g_threads == NULL)
+ return NULL;
+
+ blk = g_threads[tid].block;
+
+ if (blk != 0) {
+ struct block_t *block = get_block(blk);
+
+ if (!block)
+ return NULL;
+
+ if (block->head.offset + len < DATA_MAX)
+ return block;
+ }
+
+ r = alloc_block_for_thread();
+ if (r <= 0)
+ return NULL;
+
+ return get_block((uint16_t)r);
+}
+
+static int _vlog_write(const unsigned char prio, const char *tag, const char *msg)
+{
+ uint64_t ts = ktime_get_ns();
+ uint16_t *slt = (uint16_t *)&ts;
+ const int tid = current->pid;
+ size_t hd_size = sizeof(struct entry_t);
+ size_t prio_size = 1;
+ size_t tag_size = strnlen(tag, MAX_TAG_SIZE) + 1;
+ size_t msg_size = strnlen(msg, MAX_MSG_SIZE) + 1;
+ size_t entry_size = hd_size + prio_size + tag_size + msg_size;
+ struct block_t *block = get_valid_block(tid, entry_size);
+ struct entry_t *entry;
+ struct entry_t tmp;
+
+ if (block == NULL)
+ return -ENOMEM;
+
+ entry = (struct entry_t *)(block->data + block->head.offset);
+
+ if ((char *)entry + entry_size > (char *)block + V_BLOCK_SIZE) {
+ pr_err("[%d] block:%p(tid:%d offset:%x) entry:%p(%zu)\n",
+ tid, block, block->head.tid, block->head.offset, entry, entry_size);
+ return -EFAULT;
+ }
+
+ tmp.time = ts;
+ tmp.CRC = slt[0] + slt[1] + slt[2] + slt[3];
+ tmp.len = (uint16_t)entry_size - hd_size;
+
+ memcpy(entry, &tmp, hd_size);
+
+ entry->data[0] = (char)prio;
+
+ memcpy(&entry->data[prio_size], tag, tag_size-1);
+ entry->data[prio_size+tag_size-1] = 0;
+
+ memcpy(&entry->data[prio_size+tag_size], msg, msg_size-1);
+ entry->data[prio_size+tag_size+msg_size-1] = 0;
+
+ block->head.offset += (uint16_t)entry_size;
+ block->head.ts = ts;
+
+ return (int)entry_size;
+}
+
+static struct vlogger_ops g_vlog_ops = {
+ .write = _vlog_write
+};
+#endif
+
+int vlogger_init(void)
+{
+ int i = 0;
+ int r = 0;
+
+ if (!g_vlog_enable) {
+ pr_info("vlog is disable\n");
+ return 0;
+ }
+
+ vlogger_device.minor = MISC_DYNAMIC_MINOR;
+ vlogger_device.name = VLOGGER_DEVICE_NAME;
+ vlogger_device.fops = &vlogger_fops;
+ vlogger_device.mode = 0666;
+#ifdef CONFIG_SECURITY_SMACK_SET_DEV_SMK_LABEL
+ vlogger_device.lab_smk64 = VLOGGER_SMACK_LABEL;
+#endif
+ r = misc_register(&vlogger_device);
+ if (unlikely(r)) {
+ pr_err("Failed to register misc device for '%s' (%d)\n", VLOGGER_DEVICE_NAME, r);
+ return r;
+ }
+
+ r = sysfs_create_group(&vlogger_device.this_device->kobj, &vlogger_attr_group);
+ if (unlikely(r)) {
+ dev_err(vlogger_device.this_device, "failed to create sysfs nodes with (%d)\n", r);
+ return r;
+ }
+
+ g_threads = kzalloc(sizeof(struct thread_t) * MAX_THREAD, GFP_KERNEL);
+ if (g_threads == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT; i++) {
+ g_shm_ptr[i] = kzalloc(MAP_SIZE, GFP_KERNEL);
+ if (g_shm_ptr[i] == NULL)
+ return -ENOMEM;
+ }
+
+ mutex_init(&g_block_mutex);
+ mutex_init(&g_task_mutex);
+
+ init_completion(&g_completion);
+
+ queue_init(&g_free_q, "free", BLOCK_COUNT);
+ for (i = 1; i <= BLOCK_COUNT; i++)
+ queue_push(&g_free_q, i);
+
+#ifdef CONFIG_TRACE_CLOCK
+ g_hwc_offset = trace_clock_local() - ktime_get_ns();
+#endif
+#ifdef CONFIG_TIZEN_VLOGGER
+ vlog_set_ops(&g_vlog_ops);
+#endif
+ run_task();
+
+ g_init = 1;
+ pr_info("Init success\n");
+
+ return 0;
+}
+
+int vlogger_exit(void)
+{
+ int i;
+
+ queue_deinit(&g_free_q);
+ kfree(g_threads);
+ for (i = 0; i < DEVICE_COUNT; i++)
+ kfree(g_shm_ptr[i]);
+
+ sysfs_remove_group(&vlogger_device.this_device->kobj, &vlogger_attr_group);
+ misc_deregister(&vlogger_device);
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("jh1009.sung, <jh1009.sung@samsung.com>");
+MODULE_DESCRIPTION("Tizen Vlogger");
%install
mkdir -p %{buildroot}/usr/src/%{name}/kdbus
+mkdir -p %{buildroot}/usr/src/%{name}/vlogger
+mkdir -p %{buildroot}/usr/src/%{name}/vlogger/include_internal
mkdir -p %{buildroot}/%{_includedir}/linux
mkdir -p %{buildroot}/%{_libexecdir}/%{name}/tests
cp -a include/ %{buildroot}/usr/src/%{name}
cp kernel/*.[ch] kernel/Makefile COPYING %{buildroot}/usr/src/%{name}
cp kernel/kdbus/*.[ch] kernel/kdbus/Makefile %{buildroot}/usr/src/%{name}/kdbus
+cp kernel/vlogger/*.c kernel/vlogger/Makefile %{buildroot}/usr/src/%{name}/vlogger
+cp kernel/vlogger/include_internal/*.h %{buildroot}/usr/src/%{name}/vlogger/include_internal
cp include/uapi/linux/kdbus.h %{buildroot}/%{_includedir}/linux
cp include/uapi/linux/logger.h %{buildroot}/%{_includedir}/linux
/usr/src/%{name}/kdbus/Makefile
/usr/src/%{name}/include/uapi/linux/*.h
+/usr/src/%{name}/vlogger/*.c
+/usr/src/%{name}/vlogger/include_internal/*.h
+/usr/src/%{name}/vlogger/Makefile
+
%files -n linux-tizen-modules-headers
%manifest %{name}.manifest
%license COPYING