#endif
};
+struct log_buffer {
+#ifdef CONFIG_PRINTK
+ struct list_head list; /* kmsg as head of the list */
+ char *buf; /* cyclic log buffer */
+ u32 len; /* buffer length */
+ wait_queue_head_t wait; /* wait queue for kmsg buffer */
+#endif
/*
- * The logbuf_lock protects kmsg buffer, indices, counters. It is also
- * used in interesting ways to provide interlocking in console_unlock();
+ * The lock protects kmsg buffer, indices, counters. This can be taken within
+ * the scheduler's rq lock. It must be released before calling console_unlock()
+ * or anything else that might wake up a process.
*/
-static DEFINE_RAW_SPINLOCK(logbuf_lock);
+ raw_spinlock_t lock;
+ u64 first_seq; /* sequence number of the first record stored */
+ u32 first_idx; /* index of the first record stored */
+/* sequence number of the next record to store */
+ u64 next_seq;
+#ifdef CONFIG_PRINTK
+ u32 next_idx; /* index of the next record to store */
+ int minor; /* minor representing buffer device */
+#endif
+};
#ifdef CONFIG_PRINTK
-DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static u32 syslog_idx;
static enum log_flags syslog_prev;
static size_t syslog_partial;
-/* index and sequence number of the first record stored in the buffer */
-static u64 log_first_seq;
-static u32 log_first_idx;
-
-/* index and sequence number of the next record to store in the buffer */
-static u64 log_next_seq;
-static u32 log_next_idx;
-
/* the next printk record to write to the console */
static u64 console_seq;
static u32 console_idx;
#else
#define LOG_ALIGN __alignof__(struct log)
#endif
-#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
-static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
-static char *log_buf = __log_buf;
-static u32 log_buf_len = __LOG_BUF_LEN;
+#define __LOG_BUF_K_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+static char __log_buf_k[__LOG_BUF_K_LEN] __aligned(LOG_ALIGN);
+
+static struct log_buffer log_buf = {
+ .list = LIST_HEAD_INIT(log_buf.list),
+ .buf = __log_buf_k,
+ .len = __LOG_BUF_K_LEN,
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(log_buf.lock),
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(log_buf.wait),
+ .first_seq = 0,
+ .first_idx = 0,
+ .next_seq = 0,
+ .next_idx = 0,
+ .minor = 0,
+};
+
+wait_queue_head_t *log_wait = &log_buf.wait;
+
+/* Return log buffer address */
+char *log_buf_addr_get(void)
+{
+ return log_buf.buf;
+}
+
+/* Return log buffer size */
+u32 log_buf_len_get(void)
+{
+ return log_buf.len;
+}
-/* cpu currently holding logbuf_lock */
+/* cpu currently holding log_buf.lock */
static volatile unsigned int logbuf_cpu = UINT_MAX;
/* human readable text of the record */
}
/* get record by index; idx must point to valid msg */
-static struct log *log_from_idx(u32 idx)
+static struct log *log_from_idx(struct log_buffer *log_b, u32 idx)
{
- struct log *msg = (struct log *)(log_buf + idx);
+ struct log *msg = (struct log *)(log_b->buf + idx);
/*
* A length == 0 record is the end of buffer marker. Wrap around and
* read the message at the start of the buffer.
*/
if (!msg->len)
- return (struct log *)log_buf;
+ return (struct log *)log_b->buf;
return msg;
}
/* get next record; idx must point to valid msg */
-static u32 log_next(u32 idx)
+static u32 log_next(struct log_buffer *log_b, u32 idx)
{
- struct log *msg = (struct log *)(log_buf + idx);
+ struct log *msg = (struct log *)(log_b->buf + idx);
/* length == 0 indicates the end of the buffer; wrap */
/*
* return the one after that.
*/
if (!msg->len) {
- msg = (struct log *)log_buf;
+ msg = (struct log *)log_b->buf;
return msg->len;
}
return idx + msg->len;
}
+/*
+ * Check whether there is enough free space for the given message.
+ *
+ * The same values of first_idx and next_idx mean that the buffer
+ * is either empty or full.
+ *
+ * If the buffer is empty, we must respect the position of the indexes.
+ * They cannot be reset to the beginning of the buffer.
+ */
+static int logbuf_has_space(struct log_buffer *log_b, u32 msg_size, bool empty)
+{
+ u32 free;
+
+ if (log_b->next_idx > log_b->first_idx || empty)
+ free = max(log_b->len - log_b->next_idx, log_b->first_idx);
+ else
+ free = log_b->first_idx - log_b->next_idx;
+
+ /*
+ * We need space also for an empty header that signalizes wrapping
+ * of the buffer.
+ */
+ return free >= msg_size + sizeof(struct log);
+}
+
+static int log_make_free_space(struct log_buffer *log_b, u32 msg_size)
+{
+ while (log_b->first_seq < log_b->next_seq) {
+ if (logbuf_has_space(log_b, msg_size, false))
+ return 0;
+ /* drop old messages until we have enough contiguous space */
+ log_b->first_idx = log_next(log_b, log_b->first_idx);
+ log_b->first_seq++;
+ }
+
+ /* sequence numbers are equal, so the log buffer is empty */
+ if (logbuf_has_space(log_b, msg_size, true))
+ return 0;
+
+ return -ENOMEM;
+}
+
+/* compute the message size including the padding bytes */
+static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len)
+{
+ u32 size;
+
+ size = sizeof(struct log) + text_len + dict_len;
+ *pad_len = (-size) & (LOG_ALIGN - 1);
+ size += *pad_len;
+
+ return size;
+}
+
+/*
+ * Define how much of the log buffer we could take at maximum. The value
+ * must be greater than two. Note that only half of the buffer is available
+ * when the index points to the middle.
+ */
+#define MAX_LOG_TAKE_PART 4
+static const char trunc_msg[] = "<truncated>";
+
+
+static u32 truncate_msg(struct log_buffer *log_b,
+ u16 *text_len,
+ u16 *dict_len, u32 *pad_len)
+{
+ /*
+ * The message should not take the whole buffer. Otherwise, it might
+ * get removed too soon.
+ */
+ u32 max_text_len = log_b->len / MAX_LOG_TAKE_PART;
+ if (*text_len > max_text_len)
+ *text_len = max_text_len;
+ /* disable the "dict" completely */
+ *dict_len = 0;
+ /* compute the size again, count also the warning message */
+ return msg_used_size(*text_len + strlen(trunc_msg), 0, pad_len);
+}
+
#ifdef CONFIG_SEC_LOG
-static char initial_log_buf[__LOG_BUF_LEN];
+static char initial_log_buf[__LOG_BUF_K_LEN];
static unsigned int initial_log_idx = 0;
static void (*log_text_hook)(char *text, size_t size);
static char *seclog_buf;
unsigned *position, size_t bufsize)
{
unsigned long flags;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
if (buf && bufsize) {
seclog_buf = buf;
seclog_ptr = position;
seclog_size = bufsize;
log_text_hook = f;
}
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
}
EXPORT_SYMBOL(register_log_text_hook);
static size_t msg_print_text(const struct log *msg, enum log_flags prev,
#endif
/* insert record into the buffer, discard old ones, update heads */
-static void log_store(int facility, int level,
+static int log_store(struct log_buffer *log_b,
+ int facility, int level,
enum log_flags flags, u64 ts_nsec,
const char *dict, u16 dict_len,
const char *text, u16 text_len, int cpu)
u32 size, pad_len;
/* number of '\0' padding bytes to next message */
- size = sizeof(struct log) + text_len + dict_len;
- pad_len = (-size) & (LOG_ALIGN - 1);
- size += pad_len;
-
- while (log_first_seq < log_next_seq) {
- u32 free;
-
- if (log_next_idx > log_first_idx)
- free = max(log_buf_len - log_next_idx, log_first_idx);
- else
- free = log_first_idx - log_next_idx;
-
- if (free > size + sizeof(struct log))
- break;
+ size = msg_used_size(text_len, dict_len, &pad_len);
- /* drop old messages until we have enough contiuous space */
- log_first_idx = log_next(log_first_idx);
- log_first_seq++;
+ if (log_make_free_space(log_b, size)) {
+ /* truncate the message if it is too long for empty buffer */
+ size = truncate_msg(log_b, &text_len, &dict_len, &pad_len);
+ /* survive when the log buffer is too small for trunc_msg */
+ if (log_make_free_space(log_b, size))
+ return 0;
}
- if (log_next_idx + size + sizeof(struct log) >= log_buf_len) {
+ if (log_b->next_idx + size + sizeof(struct log) > log_b->len) {
/*
* This message + an additional empty header does not fit
* at the end of the buffer. Add an empty header with len == 0
* to signify a wrap around.
*/
- memset(log_buf + log_next_idx, 0, sizeof(struct log));
- log_next_idx = 0;
+ memset(log_b->buf + log_b->next_idx, 0,
+ sizeof(struct log));
+ log_b->next_idx = 0;
}
/* fill message */
- msg = (struct log *)(log_buf + log_next_idx);
+ msg = (struct log *)(log_b->buf + log_b->next_idx);
memcpy(log_text(msg), text, text_len);
msg->text_len = text_len;
memcpy(log_dict(msg), dict, dict_len);
sec_text, 1024);
log_text_hook(sec_text, size);
- } else if (initial_log_idx < (__LOG_BUF_LEN)) {
+ } else if (initial_log_idx < (__LOG_BUF_K_LEN)) {
/* Storing of kernel boot logs prior to log_text_hook()
* registration
*/
}
#endif
/* insert message */
- log_next_idx += msg->len;
- log_next_seq++;
+ log_b->next_idx += msg->len;
+ log_b->next_seq++;
+
+ return msg->text_len;
}
static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
u8 facility; /* log facility of first message */
enum log_flags flags; /* prefix, newline flags */
bool flushed:1; /* buffer sealed and committed */
+ int cpu;
} cont;
static void cont_flush(enum log_flags flags)
* console; wait for the console to pick up the rest of the
* line. LOG_NOCONS suppresses a duplicated output.
*/
- log_store(cont.facility, cont.level, flags | LOG_NOCONS,
- cont.ts_nsec, NULL, 0, cont.buf, cont.len);
+ log_store(&log_buf, cont.facility, cont.level,
+ flags | LOG_NOCONS, cont.ts_nsec, NULL, 0,
+ cont.buf, cont.len, cont.cpu);
cont.flags = flags;
cont.flushed = true;
} else {
* If no fragment of this line ever reached the console,
* just submit it to the store and free the buffer.
*/
- log_store(cont.facility, cont.level, flags, 0,
- NULL, 0, cont.buf, cont.len);
+ log_store(&log_buf, cont.facility, cont.level, flags, 0,
+ NULL, 0, cont.buf, cont.len, cont.cpu);
cont.len = 0;
}
}
return textlen;
}
-static int log_format_and_store(int facility, int level,
+static int log_format_and_store(struct log_buffer *log_b,
+ int facility, int level,
const char *dict, size_t dictlen,
- const char *fmt, va_list args)
+ const char *fmt, int cpu, va_list args)
{
static char textbuf[LOG_LINE_MAX];
char *text = textbuf;
switch (kern_level) {
case '0' ... '7':
- if (level == LOGLEVEL_DEFAULT)
+ if (level == DEFAULT_MESSAGE_LOGLEVEL)
level = kern_level - '0';
/* fallthrough */
case 'd': /* KERN_DEFAULT */
}
}
- if (level == LOGLEVEL_DEFAULT)
+ if (level == DEFAULT_MESSAGE_LOGLEVEL)
level = default_message_loglevel;
if (dict)
lflags |= LOG_PREFIX|LOG_NEWLINE;
+ if (log_b != &log_buf)
+ return log_store(log_b, facility, level, lflags, 0,
+ dict, dictlen, text, text_len, cpu);
+
if (!(lflags & LOG_NEWLINE)) {
/*
* Flush the conflicting buffer. An earlier newline was missing,
if (cont_add(facility, level, text, text_len))
printed_len += text_len;
else
- printed_len += log_store(facility, level,
+ printed_len += log_store(log_b, facility, level,
lflags | LOG_CONT, 0,
- dict, dictlen, text, text_len);
+ dict, dictlen, text,
+ text_len, cpu);
} else {
bool stored = false;
if (stored)
printed_len += text_len;
else
- printed_len += log_store(facility, level,
+ printed_len += log_store(log_b, facility, level,
lflags, 0, dict, dictlen,
- text, text_len);
+ text, text_len, cpu);
}
return printed_len;
}
char buf[CONSOLE_EXT_LOG_MAX];
};
+static int kmsg_sys_write(int minor, int level, const char *fmt, ...)
+{
+ va_list args;
+ int ret = -ENXIO;
+ struct log_buffer *log_b;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(log_b, &log_buf.list, list) {
+ if (log_b->minor != minor)
+ continue;
+
+ raw_spin_lock(&log_b->lock);
+
+ va_start(args, fmt);
+ log_format_and_store(log_b, 1 /* LOG_USER */, level,
+ NULL, 0, fmt, smp_processor_id(), args);
+ va_end(args);
+ wake_up_interruptible(&log_b->wait);
+
+ raw_spin_unlock(&log_b->lock);
+
+ ret = 0;
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
unsigned long count, loff_t pos)
{
int facility = 1; /* LOG_USER */
size_t len = iov_length(iv, count);
ssize_t ret = len;
+ int minor = iminor(iocb->ki_filp->f_inode);
if (len > LOG_LINE_MAX)
return -EINVAL;
}
line[len] = '\0';
- printk_emit(facility, level, NULL, 0, "%s", line);
+ if (minor == log_buf.minor) {
+ printk_emit(facility, level, NULL, 0, "%s", line);
+ } else {
+ int error = kmsg_sys_write(minor, level, "%s", line);
+
+ if (error)
+ ret = error;
+ }
+
out:
kfree(buf);
return ret;
}
-static ssize_t devkmsg_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t kmsg_read(struct log_buffer *log_b, struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
{
struct devkmsg_user *user = file->private_data;
struct log *msg;
size_t len;
ssize_t ret;
- if (!user)
- return -EBADF;
-
p = user->buf;
e = user->buf + sizeof(user->buf);
ret = mutex_lock_interruptible(&user->lock);
if (ret)
return ret;
- raw_spin_lock_irq(&logbuf_lock);
- while (user->seq == log_next_seq) {
+ raw_spin_lock_irq(&log_b->lock);
+ while (user->seq == log_b->next_seq) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_b->lock);
goto out;
}
- raw_spin_unlock_irq(&logbuf_lock);
- ret = wait_event_interruptible(log_wait,
- user->seq != log_next_seq);
+ raw_spin_unlock_irq(&log_b->lock);
+ ret = wait_event_interruptible(log_b->wait,
+ user->seq != log_b->next_seq);
if (ret)
goto out;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&log_b->lock);
}
- if (user->seq < log_first_seq) {
+ if (user->seq < log_b->first_seq) {
/* our last seen message is gone, return error and reset */
- user->idx = log_first_idx;
- user->seq = log_first_seq;
+ user->idx = log_b->first_idx;
+ user->seq = log_b->first_seq;
ret = -EPIPE;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_b->lock);
goto out;
}
- msg = log_from_idx(user->idx);
+ msg = log_from_idx(log_b, user->idx);
ts_usec = msg->ts_nsec;
do_div(ts_usec, 1000);
append_char(&p, e, '\n');
}
- user->idx = log_next(user->idx);
+ user->idx = log_next(log_b, user->idx);
user->seq++;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_b->lock);
len = p - user->buf;
if (len > count) {
out:
mutex_unlock(&user->lock);
return ret;
+
}
-static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
struct devkmsg_user *user = file->private_data;
- loff_t ret = 0;
+ ssize_t ret = -ENXIO;
+ int minor = iminor(file->f_inode);
+ struct log_buffer *log_b;
if (!user)
return -EBADF;
- if (offset)
- return -ESPIPE;
- raw_spin_lock_irq(&logbuf_lock);
+ if (minor == log_buf.minor)
+ return kmsg_read(&log_buf, file, buf, count, ppos);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(log_b, &log_buf.list, list) {
+ if (log_b->minor == minor) {
+ ret = kmsg_read(log_b, file, buf, count, ppos);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static loff_t kmsg_llseek(struct log_buffer *log_b, struct file *file,
+ int whence)
+{
+ struct devkmsg_user *user = file->private_data;
+ loff_t ret = 0;
+
+ raw_spin_lock_irq(&log_b->lock);
switch (whence) {
case SEEK_SET:
/* the first record */
- user->idx = log_first_idx;
- user->seq = log_first_seq;
+ user->idx = log_b->first_idx;
+ user->seq = log_b->first_seq;
break;
case SEEK_DATA:
+ /* no clear index for kmsg_sys buffers */
+ if (log_b != &log_buf) {
+ ret = -EINVAL;
+ break;
+ }
/*
* The first record after the last SYSLOG_ACTION_CLEAR,
* like issued by 'dmesg -c'. Reading /dev/kmsg itself
break;
case SEEK_END:
/* after the last record */
- user->idx = log_next_idx;
- user->seq = log_next_seq;
+ user->idx = log_b->next_idx;
+ user->seq = log_b->next_seq;
break;
default:
ret = -EINVAL;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_b->lock);
return ret;
}
-static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
+static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
{
struct devkmsg_user *user = file->private_data;
- int ret = 0;
+ loff_t ret = -ENXIO;
+ int minor = iminor(file->f_inode);
+ struct log_buffer *log_b;
if (!user)
- return POLLERR|POLLNVAL;
+ return -EBADF;
+ if (offset)
+ return -ESPIPE;
+
+ if (minor == log_buf.minor)
+ return kmsg_llseek(&log_buf, file, whence);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(log_b, &log_buf.list, list) {
+ if (log_b->minor == minor) {
+ ret = kmsg_llseek(log_b, file, whence);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static unsigned int kmsg_poll(struct log_buffer *log_b,
+ struct file *file, poll_table *wait)
+{
+ struct devkmsg_user *user = file->private_data;
+ int ret = 0;
- poll_wait(file, &log_wait, wait);
+ poll_wait(file, &log_b->wait, wait);
- raw_spin_lock_irq(&logbuf_lock);
- if (user->seq < log_next_seq) {
+ raw_spin_lock_irq(&log_b->lock);
+ if (user->seq < log_b->next_seq) {
/* return error when data has vanished underneath us */
- if (user->seq < log_first_seq)
+ if (user->seq < log_b->first_seq)
ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
else
ret = POLLIN|POLLRDNORM;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_b->lock);
return ret;
}
-static int devkmsg_open(struct inode *inode, struct file *file)
+static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
{
- struct devkmsg_user *user;
- int err;
+ struct devkmsg_user *user = file->private_data;
+ int ret = POLLERR|POLLNVAL;
+ int minor = iminor(file->f_inode);
+ struct log_buffer *log_b;
- /* write-only does not need any file context */
- if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- return 0;
+ if (!user)
+ return POLLERR|POLLNVAL;
+
+ if (minor == log_buf.minor)
+ return kmsg_poll(&log_buf, file, wait);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(log_b, &log_buf.list, list) {
+ if (log_b->minor == minor) {
+ ret = kmsg_poll(log_b, file, wait);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
- err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
- SYSLOG_FROM_READER);
- if (err)
- return err;
+static int kmsg_open(struct log_buffer *log_b, struct file *file)
+{
+ struct devkmsg_user *user;
user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
if (!user)
mutex_init(&user->lock);
- raw_spin_lock_irq(&logbuf_lock);
- user->idx = log_first_idx;
- user->seq = log_first_seq;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&log_b->lock);
+ user->idx = log_b->first_idx;
+ user->seq = log_b->first_seq;
+ raw_spin_unlock_irq(&log_b->lock);
file->private_data = user;
return 0;
}
+static int devkmsg_open(struct inode *inode, struct file *file)
+{
+ int ret = -ENXIO;
+ int minor = iminor(file->f_inode);
+ struct log_buffer *log_b;
+
+ /* write-only does not need any file context */
+ if ((file->f_flags & O_ACCMODE) == O_WRONLY)
+ return 0;
+
+ if (minor == log_buf.minor) {
+ ret = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
+ SYSLOG_FROM_READER);
+ if (ret)
+ return ret;
+
+ return kmsg_open(&log_buf, file);
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(log_b, &log_buf.list, list) {
+ if (log_b->minor == minor) {
+ ret = kmsg_open(log_b, file);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
static int devkmsg_release(struct inode *inode, struct file *file)
{
struct devkmsg_user *user = file->private_data;
void log_buf_kexec_setup(void)
{
VMCOREINFO_SYMBOL(log_buf);
- VMCOREINFO_SYMBOL(log_buf_len);
- VMCOREINFO_SYMBOL(log_first_idx);
- VMCOREINFO_SYMBOL(log_next_idx);
+ VMCOREINFO_STRUCT_SIZE(log_buffer);
+ VMCOREINFO_OFFSET(log_buffer, buf);
+ VMCOREINFO_OFFSET(log_buffer, len);
+ VMCOREINFO_OFFSET(log_buffer, first_idx);
+ VMCOREINFO_OFFSET(log_buffer, next_idx);
/*
* Export struct log size and field offsets. User space tools can
* parse it and detect any changes to structure down the line.
}
#endif
-/* requested log_buf_len from kernel cmdline */
+/* requested log_buf.len from kernel cmdline */
static unsigned long __initdata new_log_buf_len;
/* save requested log_buf_len since it's too early to process it */
if (size)
size = roundup_pow_of_two(size);
- if (size > log_buf_len)
+ if (size > log_buf.len)
new_log_buf_len = size;
return 0;
}
if (unlikely(!new_log_buf)) {
- pr_err("log_buf_len: %ld bytes not available\n",
+ pr_err("log_buf.len: %ld bytes not available\n",
new_log_buf_len);
return;
}
- raw_spin_lock_irqsave(&logbuf_lock, flags);
- log_buf_len = new_log_buf_len;
- log_buf = new_log_buf;
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
+ log_buf.len = new_log_buf_len;
+ log_buf.buf = new_log_buf;
new_log_buf_len = 0;
- free = __LOG_BUF_LEN - log_next_idx;
- memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ free = __LOG_BUF_K_LEN - log_buf.next_idx;
+ memcpy(log_buf.buf, __log_buf_k, __LOG_BUF_K_LEN);
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
- pr_info("log_buf_len: %d\n", log_buf_len);
+ pr_info("log_buf.len: %d\n", log_buf.len);
pr_info("early log buf free: %d(%d%%)\n",
- free, (free * 100) / __LOG_BUF_LEN);
+ free, (free * 100) / __LOG_BUF_K_LEN);
}
static bool __read_mostly ignore_loglevel;
size_t n;
size_t skip;
- raw_spin_lock_irq(&logbuf_lock);
- if (syslog_seq < log_first_seq) {
+ raw_spin_lock_irq(&log_buf.lock);
+ if (syslog_seq < log_buf.first_seq) {
/* messages are gone, move to first one */
- syslog_seq = log_first_seq;
- syslog_idx = log_first_idx;
+ syslog_seq = log_buf.first_seq;
+ syslog_idx = log_buf.first_idx;
syslog_prev = 0;
syslog_partial = 0;
}
- if (syslog_seq == log_next_seq) {
- raw_spin_unlock_irq(&logbuf_lock);
+ if (syslog_seq == log_buf.next_seq) {
+ raw_spin_unlock_irq(&log_buf.lock);
break;
}
skip = syslog_partial;
- msg = log_from_idx(syslog_idx);
+ msg = log_from_idx(&log_buf, syslog_idx);
n = msg_print_text(msg, syslog_prev, true, text,
LOG_LINE_MAX + PREFIX_MAX);
if (n - syslog_partial <= size) {
/* message fits into buffer, move forward */
- syslog_idx = log_next(syslog_idx);
+ syslog_idx = log_next(&log_buf, syslog_idx);
syslog_seq++;
syslog_prev = msg->flags;
n -= syslog_partial;
syslog_partial += n;
} else
n = 0;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_buf.lock);
if (!n)
break;
if (!text)
return -ENOMEM;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&log_buf.lock);
if (buf) {
u64 next_seq;
u64 seq;
u32 idx;
enum log_flags prev;
- if (clear_seq < log_first_seq) {
+ if (clear_seq < log_buf.first_seq) {
/* messages are gone, move to first available one */
- clear_seq = log_first_seq;
- clear_idx = log_first_idx;
+ clear_seq = log_buf.first_seq;
+ clear_idx = log_buf.first_idx;
}
/*
seq = clear_seq;
idx = clear_idx;
prev = 0;
- while (seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ while (seq < log_buf.next_seq) {
+ struct log *msg = log_from_idx(&log_buf, idx);
len += msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
- idx = log_next(idx);
+ idx = log_next(&log_buf, idx);
seq++;
}
seq = clear_seq;
idx = clear_idx;
prev = 0;
- while (len > size && seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ while (len > size && seq < log_buf.next_seq) {
+ struct log *msg = log_from_idx(&log_buf, idx);
len -= msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
- idx = log_next(idx);
+ idx = log_next(&log_buf, idx);
seq++;
}
/* last message fitting into this dump */
- next_seq = log_next_seq;
+ next_seq = log_buf.next_seq;
len = 0;
prev = 0;
while (len >= 0 && seq < next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct log *msg = log_from_idx(&log_buf, idx);
int textlen;
textlen = msg_print_text(msg, prev, true, text,
len = textlen;
break;
}
- idx = log_next(idx);
+ idx = log_next(&log_buf, idx);
seq++;
prev = msg->flags;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_buf.lock);
if (copy_to_user(buf + len, text, textlen))
len = -EFAULT;
else
len += textlen;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&log_buf.lock);
- if (seq < log_first_seq) {
+ if (seq < log_buf.first_seq) {
/* messages are gone, move to next one */
- seq = log_first_seq;
- idx = log_first_idx;
+ seq = log_buf.first_seq;
+ idx = log_buf.first_idx;
prev = 0;
}
}
}
if (clear) {
- clear_seq = log_next_seq;
- clear_idx = log_next_idx;
+ clear_seq = log_buf.next_seq;
+ clear_idx = log_buf.next_idx;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_buf.lock);
kfree(text);
return len;
error = -EFAULT;
goto out;
}
- error = wait_event_interruptible(log_wait,
- syslog_seq != log_next_seq);
+ error = wait_event_interruptible(log_buf.wait,
+ syslog_seq != log_buf.next_seq);
if (error)
goto out;
error = syslog_print(buf, len);
break;
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
- raw_spin_lock_irq(&logbuf_lock);
- if (syslog_seq < log_first_seq) {
+ raw_spin_lock_irq(&log_buf.lock);
+ if (syslog_seq < log_buf.first_seq) {
/* messages are gone, move to first one */
- syslog_seq = log_first_seq;
- syslog_idx = log_first_idx;
+ syslog_seq = log_buf.first_seq;
+ syslog_idx = log_buf.first_idx;
syslog_prev = 0;
syslog_partial = 0;
}
* for pending data, not the size; return the count of
* records, not the length.
*/
- error = log_next_idx - syslog_idx;
+ error = log_buf.next_idx - syslog_idx;
} else {
u64 seq = syslog_seq;
u32 idx = syslog_idx;
enum log_flags prev = syslog_prev;
error = 0;
- while (seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ while (seq < log_buf.next_seq) {
+ struct log *msg = log_from_idx(&log_buf, idx);
error += msg_print_text(msg, prev, true, NULL, 0);
- idx = log_next(idx);
+ idx = log_next(&log_buf, idx);
seq++;
prev = msg->flags;
}
error -= syslog_partial;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&log_buf.lock);
break;
/* Size of the log buffer */
case SYSLOG_ACTION_SIZE_BUFFER:
- error = log_buf_len;
+ error = log_buf.len;
break;
default:
error = -EINVAL;
debug_locks_off();
/* If a crash is occurring, make sure we can't deadlock */
- raw_spin_lock_init(&logbuf_lock);
+ raw_spin_lock_init(&log_buf.lock);
/* And make sure that we print immediately */
sema_init(&console_sem, 1);
}
* console_lock held, and 'console_locked' set) if it
* is successful, false otherwise.
*
- * This gets called with the 'logbuf_lock' spinlock held and
+ * This gets called with the 'log_buf.lock' spinlock held and
* interrupts disabled. It should return with 'lockbuf_lock'
* released but interrupts still disabled.
*/
static int console_trylock_for_printk(unsigned int cpu)
- __releases(&logbuf_lock)
+ __releases(&log_buf.lock)
{
int retval = 0, wake = 0;
}
}
logbuf_cpu = UINT_MAX;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&log_buf.lock);
if (wake)
up(&console_sem);
return retval;
}
lockdep_off();
- raw_spin_lock(&logbuf_lock);
+ raw_spin_lock(&log_buf.lock);
logbuf_cpu = this_cpu;
if (recursion_bug) {
"BUG: recent printk recursion!";
recursion_bug = 0;
- printed_len += strlen(recursion_msg);
/* emit KERN_CRIT message */
- log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
- NULL, 0, recursion_msg, printed_len, logbuf_cpu);
+ printed_len += log_store(&log_buf, 0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
+ NULL, 0, recursion_msg, strlen(recursion_msg), this_cpu);
+
}
- printed_len += log_format_and_store(facility, level, dict, dictlen,
- fmt, args);
+ printed_len += log_format_and_store(&log_buf, facility, level, dict, dictlen,
+ fmt, this_cpu, args);
/*
* Try to acquire and then immediately release the console semaphore.
* The release will print out buffers and wake up /dev/kmsg and syslog()
* users.
*
- * The console_trylock_for_printk() function will release 'logbuf_lock'
+ * The console_trylock_for_printk() function will release 'log_buf.lock'
* regardless of whether it actually gets the console semaphore or not.
*/
if (console_trylock_for_printk(this_cpu))
#define LOG_LINE_MAX 0
#define PREFIX_MAX 0
-#define LOG_LINE_MAX 0
+
+static struct log_buffer log_buf = {
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(log_buf.lock),
+ .first_seq = 0,
+ .first_idx = 0,
+ .next_seq = 0,
+};
+
static u64 syslog_seq;
static u32 syslog_idx;
static u64 console_seq;
static u32 console_idx;
static enum log_flags syslog_prev;
-static u64 log_first_seq;
-static u32 log_first_idx;
-static u64 log_next_seq;
static enum log_flags console_prev;
static struct cont {
size_t len;
u8 level;
bool flushed:1;
} cont;
-static struct log *log_from_idx(u32 idx) { return NULL; }
-static u32 log_next(u32 idx) { return 0; }
+static struct log *log_from_idx(struct log_buffer *log_b, u32 idx) { return NULL; }
+static u32 log_next(struct log_buffer *log_b, u32 idx) { return 0; }
static void call_console_drivers(int level, const char *text, size_t len) {}
static size_t msg_print_text(const struct log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size) { return 0; }
unsigned long flags;
size_t len;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
if (!cont.len)
goto out;
* busy. The earlier ones need to be printed before this one, we
* did not flush any fragment so far, so just let it queue up.
*/
- if (console_seq < log_next_seq && !cont.cons)
+ if (console_seq < log_buf.next_seq && !cont.cons)
goto out;
len = cont_print_text(text, size);
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&log_buf.lock);
stop_critical_timings();
call_console_drivers(cont.level, text, len);
start_critical_timings();
local_irq_restore(flags);
return;
out:
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
}
/**
size_t len;
int level;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
- if (seen_seq != log_next_seq) {
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
+ if (seen_seq != log_buf.next_seq) {
wake_klogd = true;
- seen_seq = log_next_seq;
+ seen_seq = log_buf.next_seq;
}
- if (console_seq < log_first_seq) {
+ if (console_seq < log_buf.first_seq) {
/* messages are gone, move to first one */
- console_seq = log_first_seq;
- console_idx = log_first_idx;
+ console_seq = log_buf.first_seq;
+ console_idx = log_buf.first_idx;
console_prev = 0;
}
skip:
- if (console_seq == log_next_seq)
+ if (console_seq == log_buf.next_seq)
break;
- msg = log_from_idx(console_idx);
+ msg = log_from_idx(&log_buf, console_idx);
if (msg->flags & LOG_NOCONS) {
/*
* Skip record we have buffered and already printed
* directly to the console when we received it.
*/
- console_idx = log_next(console_idx);
+ console_idx = log_next(&log_buf, console_idx);
console_seq++;
/*
* We will get here again when we register a new
level = msg->level;
len = msg_print_text(msg, console_prev, false,
text, sizeof(text));
- console_idx = log_next(console_idx);
+ console_idx = log_next(&log_buf, console_idx);
console_seq++;
console_prev = msg->flags;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&log_buf.lock);
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(level, text, len);
if (unlikely(exclusive_console))
exclusive_console = NULL;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&log_buf.lock);
up(&console_sem);
* there's a new owner and the console_unlock() from them will do the
* flush, no worries.
*/
- raw_spin_lock(&logbuf_lock);
- retry = console_seq != log_next_seq;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_lock(&log_buf.lock);
+ retry = console_seq != log_buf.next_seq;
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
if (retry && console_trylock())
goto again;
* console_unlock(); will print out the buffered messages
* for us.
*/
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
console_seq = syslog_seq;
console_idx = syslog_idx;
console_prev = syslog_prev;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
/*
* We're about to replay the log buffer. Only do this to the
* just-registered console to avoid excessive message spam to
}
if (pending & PRINTK_PENDING_WAKEUP)
- wake_up_interruptible(&log_wait);
+ wake_up_interruptible(&log_buf.wait);
}
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
void wake_up_klogd(void)
{
preempt_disable();
- if (waitqueue_active(&log_wait)) {
+ if (waitqueue_active(&log_buf.wait)) {
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
}
/* initialize iterator with data about the stored records */
dumper->active = true;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
dumper->cur_seq = clear_seq;
dumper->cur_idx = clear_idx;
- dumper->next_seq = log_next_seq;
- dumper->next_idx = log_next_idx;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ dumper->next_seq = log_buf.next_seq;
+ dumper->next_idx = log_buf.next_idx;
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
/* invoke dumper which will iterate over records */
dumper->dump(dumper, reason);
if (!dumper->active)
goto out;
- if (dumper->cur_seq < log_first_seq) {
+ if (dumper->cur_seq < log_buf.first_seq) {
/* messages are gone, move to first available one */
- dumper->cur_seq = log_first_seq;
- dumper->cur_idx = log_first_idx;
+ dumper->cur_seq = log_buf.first_seq;
+ dumper->cur_idx = log_buf.first_idx;
}
/* last entry */
- if (dumper->cur_seq >= log_next_seq)
+ if (dumper->cur_seq >= log_buf.next_seq)
goto out;
- msg = log_from_idx(dumper->cur_idx);
+ msg = log_from_idx(&log_buf, dumper->cur_idx);
l = msg_print_text(msg, 0, syslog, line, size);
- dumper->cur_idx = log_next(dumper->cur_idx);
+ dumper->cur_idx = log_next(&log_buf, dumper->cur_idx);
dumper->cur_seq++;
ret = true;
out:
unsigned long flags;
bool ret;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
return ret;
}
if (!dumper->active)
goto out;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
- if (dumper->cur_seq < log_first_seq) {
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
+ if (dumper->cur_seq < log_buf.first_seq) {
/* messages are gone, move to first available one */
- dumper->cur_seq = log_first_seq;
- dumper->cur_idx = log_first_idx;
+ dumper->cur_seq = log_buf.first_seq;
+ dumper->cur_idx = log_buf.first_idx;
}
/* last entry */
if (dumper->cur_seq >= dumper->next_seq) {
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
goto out;
}
idx = dumper->cur_idx;
prev = 0;
while (seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct log *msg = log_from_idx(&log_buf, idx);
l += msg_print_text(msg, prev, true, NULL, 0);
- idx = log_next(idx);
+ idx = log_next(&log_buf, idx);
seq++;
prev = msg->flags;
}
idx = dumper->cur_idx;
prev = 0;
while (l > size && seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct log *msg = log_from_idx(&log_buf, idx);
l -= msg_print_text(msg, prev, true, NULL, 0);
- idx = log_next(idx);
+ idx = log_next(&log_buf, idx);
seq++;
prev = msg->flags;
}
l = 0;
prev = 0;
while (seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct log *msg = log_from_idx(&log_buf, idx);
l += msg_print_text(msg, prev, syslog, buf + l, size - l);
- idx = log_next(idx);
+ idx = log_next(&log_buf, idx);
seq++;
prev = msg->flags;
}
dumper->next_seq = next_seq;
dumper->next_idx = next_idx;
ret = true;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
out:
if (len)
*len = l;
{
dumper->cur_seq = clear_seq;
dumper->cur_idx = clear_idx;
- dumper->next_seq = log_next_seq;
- dumper->next_idx = log_next_idx;
+ dumper->next_seq = log_buf.next_seq;
+ dumper->next_idx = log_buf.next_idx;
}
/**
{
unsigned long flags;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&log_buf.lock, flags);
kmsg_dump_rewind_nolock(dumper);
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&log_buf.lock, flags);
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);