1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/printk.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Modified to make sys_syslog() more flexible: added commands to
8 * return the last 4k of kernel messages, regardless of whether
9 * they've been read or not. Added option to suppress kernel printk's
10 * to the console. Added hook for sending the console messages
11 * elsewhere, in preparation for a serial line console (someday).
13 * Modified for sysctl support, 1/8/97, Chris Horn.
14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15 * manfred@colorfullife.com
16 * Rewrote bits to get rid of console_lock
17 * 01Mar01 Andrew Morton
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kernel.h>
24 #include <linux/tty.h>
25 #include <linux/tty_driver.h>
26 #include <linux/console.h>
27 #include <linux/init.h>
28 #include <linux/jiffies.h>
29 #include <linux/nmi.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/delay.h>
33 #include <linux/smp.h>
34 #include <linux/security.h>
35 #include <linux/memblock.h>
36 #include <linux/syscalls.h>
37 #include <linux/crash_core.h>
38 #include <linux/ratelimit.h>
39 #include <linux/kmsg_dump.h>
40 #include <linux/syslog.h>
41 #include <linux/cpu.h>
42 #include <linux/rculist.h>
43 #include <linux/poll.h>
44 #include <linux/irq_work.h>
45 #include <linux/ctype.h>
46 #include <linux/uio.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/debug.h>
49 #include <linux/sched/task_stack.h>
51 #include <linux/uaccess.h>
52 #include <asm/sections.h>
54 #include <trace/events/initcall.h>
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/printk.h>
58 #include "printk_ringbuffer.h"
59 #include "console_cmdline.h"
63 int console_printk[4] = {
64 CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
65 MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
66 CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
67 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
69 EXPORT_SYMBOL_GPL(console_printk);
71 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
72 EXPORT_SYMBOL(ignore_console_lock_warning);
75 * Low level drivers may need that to know if they can schedule in
76 * their unblank() callback or not. So let's export it.
79 EXPORT_SYMBOL(oops_in_progress);
82 * console_sem protects the console_drivers list, and also
83 * provides serialisation for access to the entire console
86 static DEFINE_SEMAPHORE(console_sem);
87 struct console *console_drivers;
88 EXPORT_SYMBOL_GPL(console_drivers);
91 * System may need to suppress printk message under certain
92 * circumstances, like after kernel panic happens.
94 int __read_mostly suppress_printk;
97 * During panic, heavy printk by other CPUs can delay the
98 * panic and risk deadlock on console resources.
100 static int __read_mostly suppress_panic_printk;
102 #ifdef CONFIG_LOCKDEP
103 static struct lockdep_map console_lock_dep_map = {
104 .name = "console_lock"
108 enum devkmsg_log_bits {
109 __DEVKMSG_LOG_BIT_ON = 0,
110 __DEVKMSG_LOG_BIT_OFF,
111 __DEVKMSG_LOG_BIT_LOCK,
114 enum devkmsg_log_masks {
115 DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
116 DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
117 DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
120 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
121 #define DEVKMSG_LOG_MASK_DEFAULT 0
123 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
125 static int __control_devkmsg(char *str)
132 len = str_has_prefix(str, "on");
134 devkmsg_log = DEVKMSG_LOG_MASK_ON;
138 len = str_has_prefix(str, "off");
140 devkmsg_log = DEVKMSG_LOG_MASK_OFF;
144 len = str_has_prefix(str, "ratelimit");
146 devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
153 static int __init control_devkmsg(char *str)
155 if (__control_devkmsg(str) < 0) {
156 pr_warn("printk.devkmsg: bad option string '%s'\n", str);
161 * Set sysctl string accordingly:
163 if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
164 strcpy(devkmsg_log_str, "on");
165 else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
166 strcpy(devkmsg_log_str, "off");
167 /* else "ratelimit" which is set by default. */
170 * Sysctl cannot change it anymore. The kernel command line setting of
171 * this parameter is to force the setting to be permanent throughout the
172 * runtime of the system. This is a precation measure against userspace
173 * trying to be a smarta** and attempting to change it up on us.
175 devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
179 __setup("printk.devkmsg=", control_devkmsg);
181 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
182 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
183 int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
184 void *buffer, size_t *lenp, loff_t *ppos)
186 char old_str[DEVKMSG_STR_MAX_SIZE];
191 if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
195 strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE);
198 err = proc_dostring(table, write, buffer, lenp, ppos);
203 err = __control_devkmsg(devkmsg_log_str);
206 * Do not accept an unknown string OR a known string with
209 if (err < 0 || (err + 1 != *lenp)) {
211 /* ... and restore old setting. */
213 strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE);
221 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
224 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
225 * macros instead of functions so that _RET_IP_ contains useful information.
227 #define down_console_sem() do { \
229 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
232 static int __down_trylock_console_sem(unsigned long ip)
238 * Here and in __up_console_sem() we need to be in safe mode,
239 * because spindump/WARN/etc from under console ->lock will
240 * deadlock in printk()->down_trylock_console_sem() otherwise.
242 printk_safe_enter_irqsave(flags);
243 lock_failed = down_trylock(&console_sem);
244 printk_safe_exit_irqrestore(flags);
248 mutex_acquire(&console_lock_dep_map, 0, 1, ip);
251 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
253 static void __up_console_sem(unsigned long ip)
257 mutex_release(&console_lock_dep_map, ip);
259 printk_safe_enter_irqsave(flags);
261 printk_safe_exit_irqrestore(flags);
263 #define up_console_sem() __up_console_sem(_RET_IP_)
265 static bool panic_in_progress(void)
267 return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
271 * This is used for debugging the mess that is the VT code by
272 * keeping track if we have the console semaphore held. It's
273 * definitely not the perfect debug tool (we don't know if _WE_
274 * hold it and are racing, but it helps tracking those weird code
275 * paths in the console code where we end up in places I want
276 * locked without the console semaphore held).
278 static int console_locked, console_suspended;
281 * Array of consoles built from command line options (console=)
284 #define MAX_CMDLINECONSOLES 8
286 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
288 static int preferred_console = -1;
289 int console_set_on_cmdline;
290 EXPORT_SYMBOL(console_set_on_cmdline);
292 /* Flag: console code may call schedule() */
293 static int console_may_schedule;
295 enum con_msg_format_flags {
296 MSG_FORMAT_DEFAULT = 0,
297 MSG_FORMAT_SYSLOG = (1 << 0),
300 static int console_msg_format = MSG_FORMAT_DEFAULT;
303 * The printk log buffer consists of a sequenced collection of records, each
304 * containing variable length message text. Every record also contains its
305 * own meta-data (@info).
307 * Every record meta-data carries the timestamp in microseconds, as well as
308 * the standard userspace syslog level and syslog facility. The usual kernel
309 * messages use LOG_KERN; userspace-injected messages always carry a matching
310 * syslog facility, by default LOG_USER. The origin of every message can be
311 * reliably determined that way.
313 * The human readable log message of a record is available in @text, the
314 * length of the message text in @text_len. The stored message is not
317 * Optionally, a record can carry a dictionary of properties (key/value
318 * pairs), to provide userspace with a machine-readable message context.
320 * Examples for well-defined, commonly used property names are:
321 * DEVICE=b12:8 device identifier
325 * +sound:card0 subsystem:devname
326 * SUBSYSTEM=pci driver-core subsystem name
328 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
329 * and values are terminated by a '\0' character.
331 * Example of record values:
332 * record.text_buf = "it's a line" (unterminated)
333 * record.info.seq = 56
334 * record.info.ts_nsec = 36863
335 * record.info.text_len = 11
336 * record.info.facility = 0 (LOG_KERN)
337 * record.info.flags = 0
338 * record.info.level = 3 (LOG_ERR)
339 * record.info.caller_id = 299 (task 299)
340 * record.info.dev_info.subsystem = "pci" (terminated)
341 * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
343 * The 'struct printk_info' buffer must never be directly exported to
344 * userspace, it is a kernel-private implementation detail that might
345 * need to be changed in the future, when the requirements change.
347 * /dev/kmsg exports the structured data in the following line format:
348 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
350 * Users of the export format should ignore possible additional values
351 * separated by ',', and find the message after the ';' character.
353 * The optional key/value pairs are attached as continuation lines starting
354 * with a space character and terminated by a newline. All possible
355 * non-prinatable characters are escaped in the "\xff" notation.
358 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
359 static DEFINE_MUTEX(syslog_lock);
362 DECLARE_WAIT_QUEUE_HEAD(log_wait);
363 /* All 3 protected by @syslog_lock. */
364 /* the next printk record to read by syslog(READ) or /proc/kmsg */
365 static u64 syslog_seq;
366 static size_t syslog_partial;
367 static bool syslog_time;
370 seqcount_latch_t latch;
375 * The next printk record to read after the last 'clear' command. There are
376 * two copies (updated with seqcount_latch) so that reads can locklessly
377 * access a valid value. Writers are synchronized by @syslog_lock.
379 static struct latched_seq clear_seq = {
380 .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
385 #ifdef CONFIG_PRINTK_CALLER
386 #define PREFIX_MAX 48
388 #define PREFIX_MAX 32
391 /* the maximum size of a formatted record (i.e. with prefix added per line) */
392 #define CONSOLE_LOG_MAX 1024
394 /* the maximum size for a dropped text message */
395 #define DROPPED_TEXT_MAX 64
397 /* the maximum size allowed to be reserved for a record */
398 #define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
400 #define LOG_LEVEL(v) ((v) & 0x07)
401 #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
404 #define LOG_ALIGN __alignof__(unsigned long)
405 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
406 #define LOG_BUF_LEN_MAX (u32)(1 << 31)
407 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
408 static char *log_buf = __log_buf;
409 static u32 log_buf_len = __LOG_BUF_LEN;
412 * Define the average message size. This only affects the number of
413 * descriptors that will be available. Underestimating is better than
414 * overestimating (too many available descriptors is better than not enough).
416 #define PRB_AVGBITS 5 /* 32 character average length */
418 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
419 #error CONFIG_LOG_BUF_SHIFT value too small.
421 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
422 PRB_AVGBITS, &__log_buf[0]);
424 static struct printk_ringbuffer printk_rb_dynamic;
426 static struct printk_ringbuffer *prb = &printk_rb_static;
429 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
430 * per_cpu_areas are initialised. This variable is set to true when
431 * it's safe to access per-CPU data.
433 static bool __printk_percpu_data_ready __ro_after_init;
435 bool printk_percpu_data_ready(void)
437 return __printk_percpu_data_ready;
440 /* Must be called under syslog_lock. */
441 static void latched_seq_write(struct latched_seq *ls, u64 val)
443 raw_write_seqcount_latch(&ls->latch);
445 raw_write_seqcount_latch(&ls->latch);
449 /* Can be called from any context. */
450 static u64 latched_seq_read_nolock(struct latched_seq *ls)
457 seq = raw_read_seqcount_latch(&ls->latch);
460 } while (read_seqcount_latch_retry(&ls->latch, seq));
465 /* Return log buffer address */
466 char *log_buf_addr_get(void)
471 /* Return log buffer size */
472 u32 log_buf_len_get(void)
478 * Define how much of the log buffer we could take at maximum. The value
479 * must be greater than two. Note that only half of the buffer is available
480 * when the index points to the middle.
482 #define MAX_LOG_TAKE_PART 4
483 static const char trunc_msg[] = "<truncated>";
485 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
488 * The message should not take the whole buffer. Otherwise, it might
489 * get removed too soon.
491 u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
493 if (*text_len > max_text_len)
494 *text_len = max_text_len;
496 /* enable the warning message (if there is room) */
497 *trunc_msg_len = strlen(trunc_msg);
498 if (*text_len >= *trunc_msg_len)
499 *text_len -= *trunc_msg_len;
504 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
506 static int syslog_action_restricted(int type)
511 * Unless restricted, we allow "read all" and "get buffer size"
514 return type != SYSLOG_ACTION_READ_ALL &&
515 type != SYSLOG_ACTION_SIZE_BUFFER;
518 static int check_syslog_permissions(int type, int source)
521 * If this is from /proc/kmsg and we've already opened it, then we've
522 * already done the capabilities checks at open time.
524 if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
527 if (syslog_action_restricted(type)) {
528 if (capable(CAP_SYSLOG))
531 * For historical reasons, accept CAP_SYS_ADMIN too, with
534 if (capable(CAP_SYS_ADMIN)) {
535 pr_warn_once("%s (%d): Attempt to access syslog with "
536 "CAP_SYS_ADMIN but no CAP_SYSLOG "
538 current->comm, task_pid_nr(current));
544 return security_syslog(type);
547 static void append_char(char **pp, char *e, char c)
553 static ssize_t info_print_ext_header(char *buf, size_t size,
554 struct printk_info *info)
556 u64 ts_usec = info->ts_nsec;
558 #ifdef CONFIG_PRINTK_CALLER
559 u32 id = info->caller_id;
561 snprintf(caller, sizeof(caller), ",caller=%c%u",
562 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
567 do_div(ts_usec, 1000);
569 return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
570 (info->facility << 3) | info->level, info->seq,
571 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
574 static ssize_t msg_add_ext_text(char *buf, size_t size,
575 const char *text, size_t text_len,
578 char *p = buf, *e = buf + size;
581 /* escape non-printable characters */
582 for (i = 0; i < text_len; i++) {
583 unsigned char c = text[i];
585 if (c < ' ' || c >= 127 || c == '\\')
586 p += scnprintf(p, e - p, "\\x%02x", c);
588 append_char(&p, e, c);
590 append_char(&p, e, endc);
595 static ssize_t msg_add_dict_text(char *buf, size_t size,
596 const char *key, const char *val)
598 size_t val_len = strlen(val);
604 len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
605 len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
606 len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
611 static ssize_t msg_print_ext_body(char *buf, size_t size,
612 char *text, size_t text_len,
613 struct dev_printk_info *dev_info)
617 len = msg_add_ext_text(buf, size, text, text_len, '\n');
622 len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
623 dev_info->subsystem);
624 len += msg_add_dict_text(buf + len, size - len, "DEVICE",
630 /* /dev/kmsg - userspace message inject/listen interface */
631 struct devkmsg_user {
633 struct ratelimit_state rs;
635 char buf[CONSOLE_EXT_LOG_MAX];
637 struct printk_info info;
638 char text_buf[CONSOLE_EXT_LOG_MAX];
639 struct printk_record record;
642 static __printf(3, 4) __cold
643 int devkmsg_emit(int facility, int level, const char *fmt, ...)
649 r = vprintk_emit(facility, level, NULL, fmt, args);
655 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
658 int level = default_message_loglevel;
659 int facility = 1; /* LOG_USER */
660 struct file *file = iocb->ki_filp;
661 struct devkmsg_user *user = file->private_data;
662 size_t len = iov_iter_count(from);
665 if (!user || len > LOG_LINE_MAX)
668 /* Ignore when user logging is disabled. */
669 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
672 /* Ratelimit when not explicitly enabled. */
673 if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
674 if (!___ratelimit(&user->rs, current->comm))
678 buf = kmalloc(len+1, GFP_KERNEL);
683 if (!copy_from_iter_full(buf, len, from)) {
689 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
690 * the decimal value represents 32bit, the lower 3 bit are the log
691 * level, the rest are the log facility.
693 * If no prefix or no userspace facility is specified, we
694 * enforce LOG_USER, to be able to reliably distinguish
695 * kernel-generated messages from userspace-injected ones.
698 if (line[0] == '<') {
702 u = simple_strtoul(line + 1, &endp, 10);
703 if (endp && endp[0] == '>') {
704 level = LOG_LEVEL(u);
705 if (LOG_FACILITY(u) != 0)
706 facility = LOG_FACILITY(u);
712 devkmsg_emit(facility, level, "%s", line);
717 static ssize_t devkmsg_read(struct file *file, char __user *buf,
718 size_t count, loff_t *ppos)
720 struct devkmsg_user *user = file->private_data;
721 struct printk_record *r = &user->record;
728 ret = mutex_lock_interruptible(&user->lock);
732 if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) {
733 if (file->f_flags & O_NONBLOCK) {
739 * Guarantee this task is visible on the waitqueue before
740 * checking the wake condition.
742 * The full memory barrier within set_current_state() of
743 * prepare_to_wait_event() pairs with the full memory barrier
744 * within wq_has_sleeper().
746 * This pairs with __wake_up_klogd:A.
748 ret = wait_event_interruptible(log_wait,
750 atomic64_read(&user->seq), r)); /* LMM(devkmsg_read:A) */
755 if (r->info->seq != atomic64_read(&user->seq)) {
756 /* our last seen message is gone, return error and reset */
757 atomic64_set(&user->seq, r->info->seq);
762 len = info_print_ext_header(user->buf, sizeof(user->buf), r->info);
763 len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
764 &r->text_buf[0], r->info->text_len,
767 atomic64_set(&user->seq, r->info->seq + 1);
774 if (copy_to_user(buf, user->buf, len)) {
780 mutex_unlock(&user->lock);
785 * Be careful when modifying this function!!!
787 * Only few operations are supported because the device works only with the
788 * entire variable length messages (records). Non-standard values are
789 * returned in the other cases and has been this way for quite some time.
790 * User space applications might depend on this behavior.
792 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
794 struct devkmsg_user *user = file->private_data;
804 /* the first record */
805 atomic64_set(&user->seq, prb_first_valid_seq(prb));
809 * The first record after the last SYSLOG_ACTION_CLEAR,
810 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
811 * changes no global state, and does not clear anything.
813 atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
816 /* after the last record */
817 atomic64_set(&user->seq, prb_next_seq(prb));
825 static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
827 struct devkmsg_user *user = file->private_data;
828 struct printk_info info;
832 return EPOLLERR|EPOLLNVAL;
834 poll_wait(file, &log_wait, wait);
836 if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
837 /* return error when data has vanished underneath us */
838 if (info.seq != atomic64_read(&user->seq))
839 ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
841 ret = EPOLLIN|EPOLLRDNORM;
847 static int devkmsg_open(struct inode *inode, struct file *file)
849 struct devkmsg_user *user;
852 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
855 /* write-only does not need any file context */
856 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
857 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
863 user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
867 ratelimit_default_init(&user->rs);
868 ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
870 mutex_init(&user->lock);
872 prb_rec_init_rd(&user->record, &user->info,
873 &user->text_buf[0], sizeof(user->text_buf));
875 atomic64_set(&user->seq, prb_first_valid_seq(prb));
877 file->private_data = user;
881 static int devkmsg_release(struct inode *inode, struct file *file)
883 struct devkmsg_user *user = file->private_data;
888 ratelimit_state_exit(&user->rs);
890 mutex_destroy(&user->lock);
895 const struct file_operations kmsg_fops = {
896 .open = devkmsg_open,
897 .read = devkmsg_read,
898 .write_iter = devkmsg_write,
899 .llseek = devkmsg_llseek,
900 .poll = devkmsg_poll,
901 .release = devkmsg_release,
904 #ifdef CONFIG_CRASH_CORE
906 * This appends the listed symbols to /proc/vmcore
908 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
909 * obtain access to symbols that are otherwise very difficult to locate. These
910 * symbols are specifically used so that utilities can access and extract the
911 * dmesg log from a vmcore file after a crash.
913 void log_buf_vmcoreinfo_setup(void)
915 struct dev_printk_info *dev_info = NULL;
917 VMCOREINFO_SYMBOL(prb);
918 VMCOREINFO_SYMBOL(printk_rb_static);
919 VMCOREINFO_SYMBOL(clear_seq);
922 * Export struct size and field offsets. User space tools can
923 * parse it and detect any changes to structure down the line.
926 VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
927 VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
928 VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
929 VMCOREINFO_OFFSET(printk_ringbuffer, fail);
931 VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
932 VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
933 VMCOREINFO_OFFSET(prb_desc_ring, descs);
934 VMCOREINFO_OFFSET(prb_desc_ring, infos);
935 VMCOREINFO_OFFSET(prb_desc_ring, head_id);
936 VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
938 VMCOREINFO_STRUCT_SIZE(prb_desc);
939 VMCOREINFO_OFFSET(prb_desc, state_var);
940 VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
942 VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
943 VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
944 VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
946 VMCOREINFO_STRUCT_SIZE(printk_info);
947 VMCOREINFO_OFFSET(printk_info, seq);
948 VMCOREINFO_OFFSET(printk_info, ts_nsec);
949 VMCOREINFO_OFFSET(printk_info, text_len);
950 VMCOREINFO_OFFSET(printk_info, caller_id);
951 VMCOREINFO_OFFSET(printk_info, dev_info);
953 VMCOREINFO_STRUCT_SIZE(dev_printk_info);
954 VMCOREINFO_OFFSET(dev_printk_info, subsystem);
955 VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
956 VMCOREINFO_OFFSET(dev_printk_info, device);
957 VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
959 VMCOREINFO_STRUCT_SIZE(prb_data_ring);
960 VMCOREINFO_OFFSET(prb_data_ring, size_bits);
961 VMCOREINFO_OFFSET(prb_data_ring, data);
962 VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
963 VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
965 VMCOREINFO_SIZE(atomic_long_t);
966 VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
968 VMCOREINFO_STRUCT_SIZE(latched_seq);
969 VMCOREINFO_OFFSET(latched_seq, val);
973 /* requested log_buf_len from kernel cmdline */
974 static unsigned long __initdata new_log_buf_len;
976 /* we practice scaling the ring buffer by powers of 2 */
977 static void __init log_buf_len_update(u64 size)
979 if (size > (u64)LOG_BUF_LEN_MAX) {
980 size = (u64)LOG_BUF_LEN_MAX;
981 pr_err("log_buf over 2G is not supported.\n");
985 size = roundup_pow_of_two(size);
986 if (size > log_buf_len)
987 new_log_buf_len = (unsigned long)size;
990 /* save requested log_buf_len since it's too early to process it */
991 static int __init log_buf_len_setup(char *str)
998 size = memparse(str, &str);
1000 log_buf_len_update(size);
1004 early_param("log_buf_len", log_buf_len_setup);
1007 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1009 static void __init log_buf_add_cpu(void)
1011 unsigned int cpu_extra;
1014 * archs should set up cpu_possible_bits properly with
1015 * set_cpu_possible() after setup_arch() but just in
1016 * case lets ensure this is valid.
1018 if (num_possible_cpus() == 1)
1021 cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1023 /* by default this will only continue through for large > 64 CPUs */
1024 if (cpu_extra <= __LOG_BUF_LEN / 2)
1027 pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1028 __LOG_CPU_MAX_BUF_LEN);
1029 pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1031 pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1033 log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1035 #else /* !CONFIG_SMP */
1036 static inline void log_buf_add_cpu(void) {}
1037 #endif /* CONFIG_SMP */
1039 static void __init set_percpu_data_ready(void)
1041 __printk_percpu_data_ready = true;
1044 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1045 struct printk_record *r)
1047 struct prb_reserved_entry e;
1048 struct printk_record dest_r;
1050 prb_rec_init_wr(&dest_r, r->info->text_len);
1052 if (!prb_reserve(&e, rb, &dest_r))
1055 memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1056 dest_r.info->text_len = r->info->text_len;
1057 dest_r.info->facility = r->info->facility;
1058 dest_r.info->level = r->info->level;
1059 dest_r.info->flags = r->info->flags;
1060 dest_r.info->ts_nsec = r->info->ts_nsec;
1061 dest_r.info->caller_id = r->info->caller_id;
1062 memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1064 prb_final_commit(&e);
1066 return prb_record_text_space(&e);
1069 static char setup_text_buf[LOG_LINE_MAX] __initdata;
1071 void __init setup_log_buf(int early)
1073 struct printk_info *new_infos;
1074 unsigned int new_descs_count;
1075 struct prb_desc *new_descs;
1076 struct printk_info info;
1077 struct printk_record r;
1078 unsigned int text_size;
1079 size_t new_descs_size;
1080 size_t new_infos_size;
1081 unsigned long flags;
1087 * Some archs call setup_log_buf() multiple times - first is very
1088 * early, e.g. from setup_arch(), and second - when percpu_areas
1092 set_percpu_data_ready();
1094 if (log_buf != __log_buf)
1097 if (!early && !new_log_buf_len)
1100 if (!new_log_buf_len)
1103 new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1104 if (new_descs_count == 0) {
1105 pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1109 new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1110 if (unlikely(!new_log_buf)) {
1111 pr_err("log_buf_len: %lu text bytes not available\n",
1116 new_descs_size = new_descs_count * sizeof(struct prb_desc);
1117 new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1118 if (unlikely(!new_descs)) {
1119 pr_err("log_buf_len: %zu desc bytes not available\n",
1121 goto err_free_log_buf;
1124 new_infos_size = new_descs_count * sizeof(struct printk_info);
1125 new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1126 if (unlikely(!new_infos)) {
1127 pr_err("log_buf_len: %zu info bytes not available\n",
1129 goto err_free_descs;
1132 prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1134 prb_init(&printk_rb_dynamic,
1135 new_log_buf, ilog2(new_log_buf_len),
1136 new_descs, ilog2(new_descs_count),
1139 local_irq_save(flags);
1141 log_buf_len = new_log_buf_len;
1142 log_buf = new_log_buf;
1143 new_log_buf_len = 0;
1145 free = __LOG_BUF_LEN;
1146 prb_for_each_record(0, &printk_rb_static, seq, &r) {
1147 text_size = add_to_rb(&printk_rb_dynamic, &r);
1148 if (text_size > free)
1154 prb = &printk_rb_dynamic;
1156 local_irq_restore(flags);
1159 * Copy any remaining messages that might have appeared from
1160 * NMI context after copying but before switching to the
1163 prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1164 text_size = add_to_rb(&printk_rb_dynamic, &r);
1165 if (text_size > free)
1171 if (seq != prb_next_seq(&printk_rb_static)) {
1172 pr_err("dropped %llu messages\n",
1173 prb_next_seq(&printk_rb_static) - seq);
1176 pr_info("log_buf_len: %u bytes\n", log_buf_len);
1177 pr_info("early log buf free: %u(%u%%)\n",
1178 free, (free * 100) / __LOG_BUF_LEN);
1182 memblock_free(new_descs, new_descs_size);
1184 memblock_free(new_log_buf, new_log_buf_len);
1187 static bool __read_mostly ignore_loglevel;
1189 static int __init ignore_loglevel_setup(char *str)
1191 ignore_loglevel = true;
1192 pr_info("debug: ignoring loglevel setting.\n");
1197 early_param("ignore_loglevel", ignore_loglevel_setup);
1198 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1199 MODULE_PARM_DESC(ignore_loglevel,
1200 "ignore loglevel setting (prints all kernel messages to the console)");
1202 static bool suppress_message_printing(int level)
1204 return (level >= console_loglevel && !ignore_loglevel);
1207 #ifdef CONFIG_BOOT_PRINTK_DELAY
1209 static int boot_delay; /* msecs delay after each printk during bootup */
1210 static unsigned long long loops_per_msec; /* based on boot_delay */
1212 static int __init boot_delay_setup(char *str)
1216 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
1217 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1219 get_option(&str, &boot_delay);
1220 if (boot_delay > 10 * 1000)
1223 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1224 "HZ: %d, loops_per_msec: %llu\n",
1225 boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1228 early_param("boot_delay", boot_delay_setup);
1230 static void boot_delay_msec(int level)
1232 unsigned long long k;
1233 unsigned long timeout;
1235 if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
1236 || suppress_message_printing(level)) {
1240 k = (unsigned long long)loops_per_msec * boot_delay;
1242 timeout = jiffies + msecs_to_jiffies(boot_delay);
1247 * use (volatile) jiffies to prevent
1248 * compiler reduction; loop termination via jiffies
1249 * is secondary and may or may not happen.
1251 if (time_after(jiffies, timeout))
1253 touch_nmi_watchdog();
1257 static inline void boot_delay_msec(int level)
1262 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1263 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1265 static size_t print_syslog(unsigned int level, char *buf)
1267 return sprintf(buf, "<%u>", level);
1270 static size_t print_time(u64 ts, char *buf)
1272 unsigned long rem_nsec = do_div(ts, 1000000000);
1274 return sprintf(buf, "[%5lu.%06lu]",
1275 (unsigned long)ts, rem_nsec / 1000);
1278 #ifdef CONFIG_PRINTK_CALLER
1279 static size_t print_caller(u32 id, char *buf)
1283 snprintf(caller, sizeof(caller), "%c%u",
1284 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1285 return sprintf(buf, "[%6s]", caller);
1288 #define print_caller(id, buf) 0
1291 static size_t info_print_prefix(const struct printk_info *info, bool syslog,
1292 bool time, char *buf)
1297 len = print_syslog((info->facility << 3) | info->level, buf);
1300 len += print_time(info->ts_nsec, buf + len);
1302 len += print_caller(info->caller_id, buf + len);
1304 if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1313 * Prepare the record for printing. The text is shifted within the given
1314 * buffer to avoid a need for another one. The following operations are
1317 * - Add prefix for each line.
1318 * - Drop truncated lines that no longer fit into the buffer.
1319 * - Add the trailing newline that has been removed in vprintk_store().
1320 * - Add a string terminator.
1322 * Since the produced string is always terminated, the maximum possible
1323 * return value is @r->text_buf_size - 1;
1325 * Return: The length of the updated/prepared text, including the added
1326 * prefixes and the newline. The terminator is not counted. The dropped
1327 * line(s) are not counted.
1329 static size_t record_print_text(struct printk_record *r, bool syslog,
1332 size_t text_len = r->info->text_len;
1333 size_t buf_size = r->text_buf_size;
1334 char *text = r->text_buf;
1335 char prefix[PREFIX_MAX];
1336 bool truncated = false;
1343 * If the message was truncated because the buffer was not large
1344 * enough, treat the available text as if it were the full text.
1346 if (text_len > buf_size)
1347 text_len = buf_size;
1349 prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1352 * @text_len: bytes of unprocessed text
1353 * @line_len: bytes of current line _without_ newline
1354 * @text: pointer to beginning of current line
1355 * @len: number of bytes prepared in r->text_buf
1358 next = memchr(text, '\n', text_len);
1360 line_len = next - text;
1362 /* Drop truncated line(s). */
1365 line_len = text_len;
1369 * Truncate the text if there is not enough space to add the
1370 * prefix and a trailing newline and a terminator.
1372 if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1373 /* Drop even the current line if no space. */
1374 if (len + prefix_len + line_len + 1 + 1 > buf_size)
1377 text_len = buf_size - len - prefix_len - 1 - 1;
1381 memmove(text + prefix_len, text, text_len);
1382 memcpy(text, prefix, prefix_len);
1385 * Increment the prepared length to include the text and
1386 * prefix that were just moved+copied. Also increment for the
1387 * newline at the end of this line. If this is the last line,
1388 * there is no newline, but it will be added immediately below.
1390 len += prefix_len + line_len + 1;
1391 if (text_len == line_len) {
1393 * This is the last line. Add the trailing newline
1394 * removed in vprintk_store().
1396 text[prefix_len + line_len] = '\n';
1401 * Advance beyond the added prefix and the related line with
1404 text += prefix_len + line_len + 1;
1407 * The remaining text has only decreased by the line with its
1410 * Note that @text_len can become zero. It happens when @text
1411 * ended with a newline (either due to truncation or the
1412 * original string ending with "\n\n"). The loop is correctly
1413 * repeated and (if not truncated) an empty line with a prefix
1416 text_len -= line_len + 1;
1420 * If a buffer was provided, it will be terminated. Space for the
1421 * string terminator is guaranteed to be available. The terminator is
1422 * not counted in the return value.
1425 r->text_buf[len] = 0;
1430 static size_t get_record_print_text_size(struct printk_info *info,
1431 unsigned int line_count,
1432 bool syslog, bool time)
1434 char prefix[PREFIX_MAX];
1437 prefix_len = info_print_prefix(info, syslog, time, prefix);
1440 * Each line will be preceded with a prefix. The intermediate
1441 * newlines are already within the text, but a final trailing
1442 * newline will be added.
1444 return ((prefix_len * line_count) + info->text_len + 1);
1448 * Beginning with @start_seq, find the first record where it and all following
1449 * records up to (but not including) @max_seq fit into @size.
1451 * @max_seq is simply an upper bound and does not need to exist. If the caller
1452 * does not require an upper bound, -1 can be used for @max_seq.
1454 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1455 bool syslog, bool time)
1457 struct printk_info info;
1458 unsigned int line_count;
1462 /* Determine the size of the records up to @max_seq. */
1463 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1464 if (info.seq >= max_seq)
1466 len += get_record_print_text_size(&info, line_count, syslog, time);
1470 * Adjust the upper bound for the next loop to avoid subtracting
1471 * lengths that were never added.
1477 * Move first record forward until length fits into the buffer. Ignore
1478 * newest messages that were not counted in the above cycle. Messages
1479 * might appear and get lost in the meantime. This is a best effort
1480 * that prevents an infinite loop that could occur with a retry.
1482 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1483 if (len <= size || info.seq >= max_seq)
1485 len -= get_record_print_text_size(&info, line_count, syslog, time);
1491 /* The caller is responsible for making sure @size is greater than 0. */
1492 static int syslog_print(char __user *buf, int size)
1494 struct printk_info info;
1495 struct printk_record r;
1500 text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
1504 prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
1506 mutex_lock(&syslog_lock);
1509 * Wait for the @syslog_seq record to be available. @syslog_seq may
1510 * change while waiting.
1515 mutex_unlock(&syslog_lock);
1517 * Guarantee this task is visible on the waitqueue before
1518 * checking the wake condition.
1520 * The full memory barrier within set_current_state() of
1521 * prepare_to_wait_event() pairs with the full memory barrier
1522 * within wq_has_sleeper().
1524 * This pairs with __wake_up_klogd:A.
1526 len = wait_event_interruptible(log_wait,
1527 prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1528 mutex_lock(&syslog_lock);
1532 } while (syslog_seq != seq);
1535 * Copy records that fit into the buffer. The above cycle makes sure
1536 * that the first record is always available.
1543 if (!prb_read_valid(prb, syslog_seq, &r))
1546 if (r.info->seq != syslog_seq) {
1547 /* message is gone, move to next valid one */
1548 syslog_seq = r.info->seq;
1553 * To keep reading/counting partial line consistent,
1554 * use printk_time value as of the beginning of a line.
1556 if (!syslog_partial)
1557 syslog_time = printk_time;
1559 skip = syslog_partial;
1560 n = record_print_text(&r, true, syslog_time);
1561 if (n - syslog_partial <= size) {
1562 /* message fits into buffer, move forward */
1563 syslog_seq = r.info->seq + 1;
1564 n -= syslog_partial;
1567 /* partial read(), remember position */
1569 syslog_partial += n;
1576 mutex_unlock(&syslog_lock);
1577 err = copy_to_user(buf, text + skip, n);
1578 mutex_lock(&syslog_lock);
1591 mutex_unlock(&syslog_lock);
1596 static int syslog_print_all(char __user *buf, int size, bool clear)
1598 struct printk_info info;
1599 struct printk_record r;
1605 text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
1611 * Find first record that fits, including all following records,
1612 * into the user-provided buffer for this dump.
1614 seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1617 prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
1620 prb_for_each_record(seq, prb, seq, &r) {
1623 textlen = record_print_text(&r, true, time);
1625 if (len + textlen > size) {
1630 if (copy_to_user(buf + len, text, textlen))
1640 mutex_lock(&syslog_lock);
1641 latched_seq_write(&clear_seq, seq);
1642 mutex_unlock(&syslog_lock);
1649 static void syslog_clear(void)
1651 mutex_lock(&syslog_lock);
1652 latched_seq_write(&clear_seq, prb_next_seq(prb));
1653 mutex_unlock(&syslog_lock);
1656 int do_syslog(int type, char __user *buf, int len, int source)
1658 struct printk_info info;
1660 static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1663 error = check_syslog_permissions(type, source);
1668 case SYSLOG_ACTION_CLOSE: /* Close log */
1670 case SYSLOG_ACTION_OPEN: /* Open log */
1672 case SYSLOG_ACTION_READ: /* Read from log */
1673 if (!buf || len < 0)
1677 if (!access_ok(buf, len))
1679 error = syslog_print(buf, len);
1681 /* Read/clear last kernel messages */
1682 case SYSLOG_ACTION_READ_CLEAR:
1685 /* Read last kernel messages */
1686 case SYSLOG_ACTION_READ_ALL:
1687 if (!buf || len < 0)
1691 if (!access_ok(buf, len))
1693 error = syslog_print_all(buf, len, clear);
1695 /* Clear ring buffer */
1696 case SYSLOG_ACTION_CLEAR:
1699 /* Disable logging to console */
1700 case SYSLOG_ACTION_CONSOLE_OFF:
1701 if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1702 saved_console_loglevel = console_loglevel;
1703 console_loglevel = minimum_console_loglevel;
1705 /* Enable logging to console */
1706 case SYSLOG_ACTION_CONSOLE_ON:
1707 if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1708 console_loglevel = saved_console_loglevel;
1709 saved_console_loglevel = LOGLEVEL_DEFAULT;
1712 /* Set level of messages printed to console */
1713 case SYSLOG_ACTION_CONSOLE_LEVEL:
1714 if (len < 1 || len > 8)
1716 if (len < minimum_console_loglevel)
1717 len = minimum_console_loglevel;
1718 console_loglevel = len;
1719 /* Implicitly re-enable logging to console */
1720 saved_console_loglevel = LOGLEVEL_DEFAULT;
1722 /* Number of chars in the log buffer */
1723 case SYSLOG_ACTION_SIZE_UNREAD:
1724 mutex_lock(&syslog_lock);
1725 if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1726 /* No unread messages. */
1727 mutex_unlock(&syslog_lock);
1730 if (info.seq != syslog_seq) {
1731 /* messages are gone, move to first one */
1732 syslog_seq = info.seq;
1735 if (source == SYSLOG_FROM_PROC) {
1737 * Short-cut for poll(/"proc/kmsg") which simply checks
1738 * for pending data, not the size; return the count of
1739 * records, not the length.
1741 error = prb_next_seq(prb) - syslog_seq;
1743 bool time = syslog_partial ? syslog_time : printk_time;
1744 unsigned int line_count;
1747 prb_for_each_info(syslog_seq, prb, seq, &info,
1749 error += get_record_print_text_size(&info, line_count,
1753 error -= syslog_partial;
1755 mutex_unlock(&syslog_lock);
1757 /* Size of the log buffer */
1758 case SYSLOG_ACTION_SIZE_BUFFER:
1759 error = log_buf_len;
1769 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1771 return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1775 * Special console_lock variants that help to reduce the risk of soft-lockups.
1776 * They allow to pass console_lock to another printk() call using a busy wait.
1779 #ifdef CONFIG_LOCKDEP
1780 static struct lockdep_map console_owner_dep_map = {
1781 .name = "console_owner"
1785 static DEFINE_RAW_SPINLOCK(console_owner_lock);
1786 static struct task_struct *console_owner;
1787 static bool console_waiter;
1790 * console_lock_spinning_enable - mark beginning of code where another
1791 * thread might safely busy wait
1793 * This basically converts console_lock into a spinlock. This marks
1794 * the section where the console_lock owner can not sleep, because
1795 * there may be a waiter spinning (like a spinlock). Also it must be
1796 * ready to hand over the lock at the end of the section.
1798 static void console_lock_spinning_enable(void)
1800 raw_spin_lock(&console_owner_lock);
1801 console_owner = current;
1802 raw_spin_unlock(&console_owner_lock);
1804 /* The waiter may spin on us after setting console_owner */
1805 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1809 * console_lock_spinning_disable_and_check - mark end of code where another
1810 * thread was able to busy wait and check if there is a waiter
1812 * This is called at the end of the section where spinning is allowed.
1813 * It has two functions. First, it is a signal that it is no longer
1814 * safe to start busy waiting for the lock. Second, it checks if
1815 * there is a busy waiter and passes the lock rights to her.
1817 * Important: Callers lose the lock if there was a busy waiter.
1818 * They must not touch items synchronized by console_lock
1821 * Return: 1 if the lock rights were passed, 0 otherwise.
1823 static int console_lock_spinning_disable_and_check(void)
1827 raw_spin_lock(&console_owner_lock);
1828 waiter = READ_ONCE(console_waiter);
1829 console_owner = NULL;
1830 raw_spin_unlock(&console_owner_lock);
1833 spin_release(&console_owner_dep_map, _THIS_IP_);
1837 /* The waiter is now free to continue */
1838 WRITE_ONCE(console_waiter, false);
1840 spin_release(&console_owner_dep_map, _THIS_IP_);
1843 * Hand off console_lock to waiter. The waiter will perform
1844 * the up(). After this, the waiter is the console_lock owner.
1846 mutex_release(&console_lock_dep_map, _THIS_IP_);
1851 * console_trylock_spinning - try to get console_lock by busy waiting
1853 * This allows to busy wait for the console_lock when the current
1854 * owner is running in specially marked sections. It means that
1855 * the current owner is running and cannot reschedule until it
1856 * is ready to lose the lock.
1858 * Return: 1 if we got the lock, 0 othrewise
1860 static int console_trylock_spinning(void)
1862 struct task_struct *owner = NULL;
1865 unsigned long flags;
1867 if (console_trylock())
1871 * It's unsafe to spin once a panic has begun. If we are the
1872 * panic CPU, we may have already halted the owner of the
1873 * console_sem. If we are not the panic CPU, then we should
1874 * avoid taking console_sem, so the panic CPU has a better
1875 * chance of cleanly acquiring it later.
1877 if (panic_in_progress())
1880 printk_safe_enter_irqsave(flags);
1882 raw_spin_lock(&console_owner_lock);
1883 owner = READ_ONCE(console_owner);
1884 waiter = READ_ONCE(console_waiter);
1885 if (!waiter && owner && owner != current) {
1886 WRITE_ONCE(console_waiter, true);
1889 raw_spin_unlock(&console_owner_lock);
1892 * If there is an active printk() writing to the
1893 * consoles, instead of having it write our data too,
1894 * see if we can offload that load from the active
1895 * printer, and do some printing ourselves.
1896 * Go into a spin only if there isn't already a waiter
1897 * spinning, and there is an active printer, and
1898 * that active printer isn't us (recursive printk?).
1901 printk_safe_exit_irqrestore(flags);
1905 /* We spin waiting for the owner to release us */
1906 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1907 /* Owner will clear console_waiter on hand off */
1908 while (READ_ONCE(console_waiter))
1910 spin_release(&console_owner_dep_map, _THIS_IP_);
1912 printk_safe_exit_irqrestore(flags);
1914 * The owner passed the console lock to us.
1915 * Since we did not spin on console lock, annotate
1916 * this as a trylock. Otherwise lockdep will
1919 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
1925 * Call the specified console driver, asking it to write out the specified
1926 * text and length. If @dropped_text is non-NULL and any records have been
1927 * dropped, a dropped message will be written out first.
1929 static void call_console_driver(struct console *con, const char *text, size_t len,
1934 if (con->dropped && dropped_text) {
1935 dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX,
1936 "** %lu printk messages dropped **\n",
1939 con->write(con, dropped_text, dropped_len);
1942 con->write(con, text, len);
1946 * Recursion is tracked separately on each CPU. If NMIs are supported, an
1947 * additional NMI context per CPU is also separately tracked. Until per-CPU
1948 * is available, a separate "early tracking" is performed.
1950 static DEFINE_PER_CPU(u8, printk_count);
1951 static u8 printk_count_early;
1952 #ifdef CONFIG_HAVE_NMI
1953 static DEFINE_PER_CPU(u8, printk_count_nmi);
1954 static u8 printk_count_nmi_early;
1958 * Recursion is limited to keep the output sane. printk() should not require
1959 * more than 1 level of recursion (allowing, for example, printk() to trigger
1960 * a WARN), but a higher value is used in case some printk-internal errors
1961 * exist, such as the ringbuffer validation checks failing.
1963 #define PRINTK_MAX_RECURSION 3
1966 * Return a pointer to the dedicated counter for the CPU+context of the
1969 static u8 *__printk_recursion_counter(void)
1971 #ifdef CONFIG_HAVE_NMI
1973 if (printk_percpu_data_ready())
1974 return this_cpu_ptr(&printk_count_nmi);
1975 return &printk_count_nmi_early;
1978 if (printk_percpu_data_ready())
1979 return this_cpu_ptr(&printk_count);
1980 return &printk_count_early;
1984 * Enter recursion tracking. Interrupts are disabled to simplify tracking.
1985 * The caller must check the boolean return value to see if the recursion is
1986 * allowed. On failure, interrupts are not disabled.
1988 * @recursion_ptr must be a variable of type (u8 *) and is the same variable
1989 * that is passed to printk_exit_irqrestore().
1991 #define printk_enter_irqsave(recursion_ptr, flags) \
1993 bool success = true; \
1995 typecheck(u8 *, recursion_ptr); \
1996 local_irq_save(flags); \
1997 (recursion_ptr) = __printk_recursion_counter(); \
1998 if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
1999 local_irq_restore(flags); \
2002 (*(recursion_ptr))++; \
2007 /* Exit recursion tracking, restoring interrupts. */
2008 #define printk_exit_irqrestore(recursion_ptr, flags) \
2010 typecheck(u8 *, recursion_ptr); \
2011 (*(recursion_ptr))--; \
2012 local_irq_restore(flags); \
2015 int printk_delay_msec __read_mostly;
2017 static inline void printk_delay(int level)
2019 boot_delay_msec(level);
2021 if (unlikely(printk_delay_msec)) {
2022 int m = printk_delay_msec;
2026 touch_nmi_watchdog();
2031 static inline u32 printk_caller_id(void)
2033 return in_task() ? task_pid_nr(current) :
2034 0x80000000 + smp_processor_id();
2038 * printk_parse_prefix - Parse level and control flags.
2040 * @text: The terminated text message.
2041 * @level: A pointer to the current level value, will be updated.
2042 * @flags: A pointer to the current printk_info flags, will be updated.
2044 * @level may be NULL if the caller is not interested in the parsed value.
2045 * Otherwise the variable pointed to by @level must be set to
2046 * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2048 * @flags may be NULL if the caller is not interested in the parsed value.
2049 * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2052 * Return: The length of the parsed level and control flags.
2054 u16 printk_parse_prefix(const char *text, int *level,
2055 enum printk_info_flags *flags)
2061 kern_level = printk_get_level(text);
2065 switch (kern_level) {
2067 if (level && *level == LOGLEVEL_DEFAULT)
2068 *level = kern_level - '0';
2070 case 'c': /* KERN_CONT */
2083 static u16 printk_sprint(char *text, u16 size, int facility,
2084 enum printk_info_flags *flags, const char *fmt,
2089 text_len = vscnprintf(text, size, fmt, args);
2091 /* Mark and strip a trailing newline. */
2092 if (text_len && text[text_len - 1] == '\n') {
2094 *flags |= LOG_NEWLINE;
2097 /* Strip log level and control flags. */
2098 if (facility == 0) {
2101 prefix_len = printk_parse_prefix(text, NULL, NULL);
2103 text_len -= prefix_len;
2104 memmove(text, text + prefix_len, text_len);
2108 trace_console_rcuidle(text, text_len);
2114 int vprintk_store(int facility, int level,
2115 const struct dev_printk_info *dev_info,
2116 const char *fmt, va_list args)
2118 struct prb_reserved_entry e;
2119 enum printk_info_flags flags = 0;
2120 struct printk_record r;
2121 unsigned long irqflags;
2122 u16 trunc_msg_len = 0;
2132 if (!printk_enter_irqsave(recursion_ptr, irqflags))
2136 * Since the duration of printk() can vary depending on the message
2137 * and state of the ringbuffer, grab the timestamp now so that it is
2138 * close to the call of printk(). This provides a more deterministic
2139 * timestamp with respect to the caller.
2141 ts_nsec = local_clock();
2143 caller_id = printk_caller_id();
2146 * The sprintf needs to come first since the syslog prefix might be
2147 * passed in as a parameter. An extra byte must be reserved so that
2148 * later the vscnprintf() into the reserved buffer has room for the
2149 * terminating '\0', which is not counted by vsnprintf().
2151 va_copy(args2, args);
2152 reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2155 if (reserve_size > LOG_LINE_MAX)
2156 reserve_size = LOG_LINE_MAX;
2158 /* Extract log level or control flags. */
2160 printk_parse_prefix(&prefix_buf[0], &level, &flags);
2162 if (level == LOGLEVEL_DEFAULT)
2163 level = default_message_loglevel;
2166 flags |= LOG_NEWLINE;
2168 if (flags & LOG_CONT) {
2169 prb_rec_init_wr(&r, reserve_size);
2170 if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
2171 text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2172 facility, &flags, fmt, args);
2173 r.info->text_len += text_len;
2175 if (flags & LOG_NEWLINE) {
2176 r.info->flags |= LOG_NEWLINE;
2177 prb_final_commit(&e);
2188 * Explicitly initialize the record before every prb_reserve() call.
2189 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2190 * structure when they fail.
2192 prb_rec_init_wr(&r, reserve_size);
2193 if (!prb_reserve(&e, prb, &r)) {
2194 /* truncate the message if it is too long for empty buffer */
2195 truncate_msg(&reserve_size, &trunc_msg_len);
2197 prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2198 if (!prb_reserve(&e, prb, &r))
2203 text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2205 memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2206 r.info->text_len = text_len + trunc_msg_len;
2207 r.info->facility = facility;
2208 r.info->level = level & 7;
2209 r.info->flags = flags & 0x1f;
2210 r.info->ts_nsec = ts_nsec;
2211 r.info->caller_id = caller_id;
2213 memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2215 /* A message without a trailing newline can be continued. */
2216 if (!(flags & LOG_NEWLINE))
2219 prb_final_commit(&e);
2221 ret = text_len + trunc_msg_len;
2223 printk_exit_irqrestore(recursion_ptr, irqflags);
2227 asmlinkage int vprintk_emit(int facility, int level,
2228 const struct dev_printk_info *dev_info,
2229 const char *fmt, va_list args)
2232 bool in_sched = false;
2234 /* Suppress unimportant messages after panic happens */
2235 if (unlikely(suppress_printk))
2238 if (unlikely(suppress_panic_printk) &&
2239 atomic_read(&panic_cpu) != raw_smp_processor_id())
2242 if (level == LOGLEVEL_SCHED) {
2243 level = LOGLEVEL_DEFAULT;
2247 printk_delay(level);
2249 printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2251 /* If called from the scheduler, we can not call up(). */
2254 * The caller may be holding system-critical or
2255 * timing-sensitive locks. Disable preemption during
2256 * printing of all remaining records to all consoles so that
2257 * this context can return as soon as possible. Hopefully
2258 * another printk() caller will take over the printing.
2262 * Try to acquire and then immediately release the console
2263 * semaphore. The release will print out buffers. With the
2264 * spinning variant, this context tries to take over the
2265 * printing from another printing context.
2267 if (console_trylock_spinning())
2275 EXPORT_SYMBOL(vprintk_emit);
2277 int vprintk_default(const char *fmt, va_list args)
2279 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2281 EXPORT_SYMBOL_GPL(vprintk_default);
2283 asmlinkage __visible int _printk(const char *fmt, ...)
2288 va_start(args, fmt);
2289 r = vprintk(fmt, args);
2294 EXPORT_SYMBOL(_printk);
2296 static bool pr_flush(int timeout_ms, bool reset_on_progress);
2297 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2299 #else /* CONFIG_PRINTK */
2301 #define CONSOLE_LOG_MAX 0
2302 #define DROPPED_TEXT_MAX 0
2303 #define printk_time false
2305 #define prb_read_valid(rb, seq, r) false
2306 #define prb_first_valid_seq(rb) 0
2307 #define prb_next_seq(rb) 0
2309 static u64 syslog_seq;
2311 static size_t record_print_text(const struct printk_record *r,
2312 bool syslog, bool time)
2316 static ssize_t info_print_ext_header(char *buf, size_t size,
2317 struct printk_info *info)
2321 static ssize_t msg_print_ext_body(char *buf, size_t size,
2322 char *text, size_t text_len,
2323 struct dev_printk_info *dev_info) { return 0; }
2324 static void console_lock_spinning_enable(void) { }
2325 static int console_lock_spinning_disable_and_check(void) { return 0; }
2326 static void call_console_driver(struct console *con, const char *text, size_t len,
2330 static bool suppress_message_printing(int level) { return false; }
2331 static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
2332 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2334 #endif /* CONFIG_PRINTK */
2336 #ifdef CONFIG_EARLY_PRINTK
2337 struct console *early_console;
2339 asmlinkage __visible void early_printk(const char *fmt, ...)
2349 n = vscnprintf(buf, sizeof(buf), fmt, ap);
2352 early_console->write(early_console, buf, n);
2356 static void set_user_specified(struct console_cmdline *c, bool user_specified)
2358 if (!user_specified)
2362 * @c console was defined by the user on the command line.
2363 * Do not clear when added twice also by SPCR or the device tree.
2365 c->user_specified = true;
2366 /* At least one console defined by the user on the command line. */
2367 console_set_on_cmdline = 1;
2370 static int __add_preferred_console(char *name, int idx, char *options,
2371 char *brl_options, bool user_specified)
2373 struct console_cmdline *c;
2377 * See if this tty is not yet registered, and
2378 * if we have a slot free.
2380 for (i = 0, c = console_cmdline;
2381 i < MAX_CMDLINECONSOLES && c->name[0];
2383 if (strcmp(c->name, name) == 0 && c->index == idx) {
2385 preferred_console = i;
2386 set_user_specified(c, user_specified);
2390 if (i == MAX_CMDLINECONSOLES)
2393 preferred_console = i;
2394 strlcpy(c->name, name, sizeof(c->name));
2395 c->options = options;
2396 set_user_specified(c, user_specified);
2397 braille_set_options(c, brl_options);
2403 static int __init console_msg_format_setup(char *str)
2405 if (!strcmp(str, "syslog"))
2406 console_msg_format = MSG_FORMAT_SYSLOG;
2407 if (!strcmp(str, "default"))
2408 console_msg_format = MSG_FORMAT_DEFAULT;
2411 __setup("console_msg_format=", console_msg_format_setup);
2414 * Set up a console. Called via do_early_param() in init/main.c
2415 * for each "console=" parameter in the boot command line.
2417 static int __init console_setup(char *str)
2419 char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */
2420 char *s, *options, *brl_options = NULL;
2424 * console="" or console=null have been suggested as a way to
2425 * disable console output. Use ttynull that has been created
2426 * for exactly this purpose.
2428 if (str[0] == 0 || strcmp(str, "null") == 0) {
2429 __add_preferred_console("ttynull", 0, NULL, NULL, true);
2433 if (_braille_console_setup(&str, &brl_options))
2437 * Decode str into name, index, options.
2439 if (str[0] >= '0' && str[0] <= '9') {
2440 strcpy(buf, "ttyS");
2441 strncpy(buf + 4, str, sizeof(buf) - 5);
2443 strncpy(buf, str, sizeof(buf) - 1);
2445 buf[sizeof(buf) - 1] = 0;
2446 options = strchr(str, ',');
2450 if (!strcmp(str, "ttya"))
2451 strcpy(buf, "ttyS0");
2452 if (!strcmp(str, "ttyb"))
2453 strcpy(buf, "ttyS1");
2455 for (s = buf; *s; s++)
2456 if (isdigit(*s) || *s == ',')
2458 idx = simple_strtoul(s, NULL, 10);
2461 __add_preferred_console(buf, idx, options, brl_options, true);
2464 __setup("console=", console_setup);
2467 * add_preferred_console - add a device to the list of preferred consoles.
2468 * @name: device name
2469 * @idx: device index
2470 * @options: options for this console
2472 * The last preferred console added will be used for kernel messages
2473 * and stdin/out/err for init. Normally this is used by console_setup
2474 * above to handle user-supplied console arguments; however it can also
2475 * be used by arch-specific code either to override the user or more
2476 * commonly to provide a default console (ie from PROM variables) when
2477 * the user has not supplied one.
2479 int add_preferred_console(char *name, int idx, char *options)
2481 return __add_preferred_console(name, idx, options, NULL, false);
2484 bool console_suspend_enabled = true;
2485 EXPORT_SYMBOL(console_suspend_enabled);
2487 static int __init console_suspend_disable(char *str)
2489 console_suspend_enabled = false;
2492 __setup("no_console_suspend", console_suspend_disable);
2493 module_param_named(console_suspend, console_suspend_enabled,
2494 bool, S_IRUGO | S_IWUSR);
2495 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2496 " and hibernate operations");
2498 static bool printk_console_no_auto_verbose;
2500 void console_verbose(void)
2502 if (console_loglevel && !printk_console_no_auto_verbose)
2503 console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2505 EXPORT_SYMBOL_GPL(console_verbose);
2507 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2508 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2511 * suspend_console - suspend the console subsystem
2513 * This disables printk() while we go into suspend states
2515 void suspend_console(void)
2517 if (!console_suspend_enabled)
2519 pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2520 pr_flush(1000, true);
2522 console_suspended = 1;
2526 void resume_console(void)
2528 if (!console_suspend_enabled)
2531 console_suspended = 0;
2533 pr_flush(1000, true);
2537 * console_cpu_notify - print deferred console messages after CPU hotplug
2540 * If printk() is called from a CPU that is not online yet, the messages
2541 * will be printed on the console only if there are CON_ANYTIME consoles.
2542 * This function is called when a new CPU comes online (or fails to come
2543 * up) or goes offline.
2545 static int console_cpu_notify(unsigned int cpu)
2547 if (!cpuhp_tasks_frozen) {
2548 /* If trylock fails, someone else is doing the printing */
2549 if (console_trylock())
2556 * console_lock - lock the console system for exclusive use.
2558 * Acquires a lock which guarantees that the caller has
2559 * exclusive access to the console system and the console_drivers list.
2561 * Can sleep, returns nothing.
2563 void console_lock(void)
2568 if (console_suspended)
2571 console_may_schedule = 1;
2573 EXPORT_SYMBOL(console_lock);
2576 * console_trylock - try to lock the console system for exclusive use.
2578 * Try to acquire a lock which guarantees that the caller has exclusive
2579 * access to the console system and the console_drivers list.
2581 * returns 1 on success, and 0 on failure to acquire the lock.
2583 int console_trylock(void)
2585 if (down_trylock_console_sem())
2587 if (console_suspended) {
2592 console_may_schedule = 0;
2595 EXPORT_SYMBOL(console_trylock);
2597 int is_console_locked(void)
2599 return console_locked;
2601 EXPORT_SYMBOL(is_console_locked);
2604 * Return true when this CPU should unlock console_sem without pushing all
2605 * messages to the console. This reduces the chance that the console is
2606 * locked when the panic CPU tries to use it.
2608 static bool abandon_console_lock_in_panic(void)
2610 if (!panic_in_progress())
2614 * We can use raw_smp_processor_id() here because it is impossible for
2615 * the task to be migrated to the panic_cpu, or away from it. If
2616 * panic_cpu has already been set, and we're not currently executing on
2617 * that CPU, then we never will be.
2619 return atomic_read(&panic_cpu) != raw_smp_processor_id();
2623 * Check if the given console is currently capable and allowed to print
2626 * Requires the console_lock.
2628 static inline bool console_is_usable(struct console *con)
2630 if (!(con->flags & CON_ENABLED))
2637 * Console drivers may assume that per-cpu resources have been
2638 * allocated. So unless they're explicitly marked as being able to
2639 * cope (CON_ANYTIME) don't call them until this CPU is officially up.
2641 if (!cpu_online(raw_smp_processor_id()) &&
2642 !(con->flags & CON_ANYTIME))
2648 static void __console_unlock(void)
2655 * Print one record for the given console. The record printed is whatever
2656 * record is the next available record for the given console.
2658 * @text is a buffer of size CONSOLE_LOG_MAX.
2660 * If extended messages should be printed, @ext_text is a buffer of size
2661 * CONSOLE_EXT_LOG_MAX. Otherwise @ext_text must be NULL.
2663 * If dropped messages should be printed, @dropped_text is a buffer of size
2664 * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
2666 * @handover will be set to true if a printk waiter has taken over the
2667 * console_lock, in which case the caller is no longer holding the
2668 * console_lock. Otherwise it is set to false.
2670 * Returns false if the given console has no next record to print, otherwise
2673 * Requires the console_lock.
2675 static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
2676 char *dropped_text, bool *handover)
2678 static int panic_console_dropped;
2679 struct printk_info info;
2680 struct printk_record r;
2681 unsigned long flags;
2685 prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
2689 if (!prb_read_valid(prb, con->seq, &r))
2692 if (con->seq != r.info->seq) {
2693 con->dropped += r.info->seq - con->seq;
2694 con->seq = r.info->seq;
2695 if (panic_in_progress() && panic_console_dropped++ > 10) {
2696 suppress_panic_printk = 1;
2697 pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
2701 /* Skip record that has level above the console loglevel. */
2702 if (suppress_message_printing(r.info->level)) {
2708 write_text = ext_text;
2709 len = info_print_ext_header(ext_text, CONSOLE_EXT_LOG_MAX, r.info);
2710 len += msg_print_ext_body(ext_text + len, CONSOLE_EXT_LOG_MAX - len,
2711 &r.text_buf[0], r.info->text_len, &r.info->dev_info);
2714 len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
2718 * While actively printing out messages, if another printk()
2719 * were to occur on another CPU, it may wait for this one to
2720 * finish. This task can not be preempted if there is a
2721 * waiter waiting to take over.
2723 * Interrupts are disabled because the hand over to a waiter
2724 * must not be interrupted until the hand over is completed
2725 * (@console_waiter is cleared).
2727 printk_safe_enter_irqsave(flags);
2728 console_lock_spinning_enable();
2730 stop_critical_timings(); /* don't trace print latency */
2731 call_console_driver(con, write_text, len, dropped_text);
2732 start_critical_timings();
2736 *handover = console_lock_spinning_disable_and_check();
2737 printk_safe_exit_irqrestore(flags);
2743 * Print out all remaining records to all consoles.
2745 * @do_cond_resched is set by the caller. It can be true only in schedulable
2748 * @next_seq is set to the sequence number after the last available record.
2749 * The value is valid only when this function returns true. It means that all
2750 * usable consoles are completely flushed.
2752 * @handover will be set to true if a printk waiter has taken over the
2753 * console_lock, in which case the caller is no longer holding the
2754 * console_lock. Otherwise it is set to false.
2756 * Returns true when there was at least one usable console and all messages
2757 * were flushed to all usable consoles. A returned false informs the caller
2758 * that everything was not flushed (either there were no usable consoles or
2759 * another context has taken over printing or it is a panic situation and this
2760 * is not the panic CPU). Regardless the reason, the caller should assume it
2761 * is not useful to immediately try again.
2763 * Requires the console_lock.
2765 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
2767 static char dropped_text[DROPPED_TEXT_MAX];
2768 static char ext_text[CONSOLE_EXT_LOG_MAX];
2769 static char text[CONSOLE_LOG_MAX];
2770 bool any_usable = false;
2771 struct console *con;
2778 any_progress = false;
2780 for_each_console(con) {
2783 if (!console_is_usable(con))
2787 if (con->flags & CON_EXTENDED) {
2788 /* Extended consoles do not print "dropped messages". */
2789 progress = console_emit_next_record(con, &text[0],
2793 progress = console_emit_next_record(con, &text[0],
2794 NULL, &dropped_text[0],
2800 /* Track the next of the highest seq flushed. */
2801 if (con->seq > *next_seq)
2802 *next_seq = con->seq;
2806 any_progress = true;
2808 /* Allow panic_cpu to take over the consoles safely. */
2809 if (abandon_console_lock_in_panic())
2812 if (do_cond_resched)
2815 } while (any_progress);
2821 * console_unlock - unlock the console system
2823 * Releases the console_lock which the caller holds on the console system
2824 * and the console driver list.
2826 * While the console_lock was held, console output may have been buffered
2827 * by printk(). If this is the case, console_unlock(); emits
2828 * the output prior to releasing the lock.
2830 * console_unlock(); may be called from any context.
2832 void console_unlock(void)
2834 bool do_cond_resched;
2839 if (console_suspended) {
2845 * Console drivers are called with interrupts disabled, so
2846 * @console_may_schedule should be cleared before; however, we may
2847 * end up dumping a lot of lines, for example, if called from
2848 * console registration path, and should invoke cond_resched()
2849 * between lines if allowable. Not doing so can cause a very long
2850 * scheduling stall on a slow console leading to RCU stall and
2851 * softlockup warnings which exacerbate the issue with more
2852 * messages practically incapacitating the system. Therefore, create
2853 * a local to use for the printing loop.
2855 do_cond_resched = console_may_schedule;
2858 console_may_schedule = 0;
2860 flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
2865 * Abort if there was a failure to flush all messages to all
2866 * usable consoles. Either it is not possible to flush (in
2867 * which case it would be an infinite loop of retrying) or
2868 * another context has taken over printing.
2874 * Some context may have added new records after
2875 * console_flush_all() but before unlocking the console.
2876 * Re-check if there is a new record to flush. If the trylock
2877 * fails, another context is already handling the printing.
2879 } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
2881 EXPORT_SYMBOL(console_unlock);
2884 * console_conditional_schedule - yield the CPU if required
2886 * If the console code is currently allowed to sleep, and
2887 * if this CPU should yield the CPU to another task, do
2890 * Must be called within console_lock();.
2892 void __sched console_conditional_schedule(void)
2894 if (console_may_schedule)
2897 EXPORT_SYMBOL(console_conditional_schedule);
2899 void console_unblank(void)
2904 * console_unblank can no longer be called in interrupt context unless
2905 * oops_in_progress is set to 1..
2907 if (oops_in_progress) {
2908 if (down_trylock_console_sem() != 0)
2914 console_may_schedule = 0;
2916 if ((c->flags & CON_ENABLED) && c->unblank)
2920 if (!oops_in_progress)
2921 pr_flush(1000, true);
2925 * console_flush_on_panic - flush console content on panic
2926 * @mode: flush all messages in buffer or just the pending ones
2928 * Immediately output all pending messages no matter what.
2930 void console_flush_on_panic(enum con_flush_mode mode)
2933 * If someone else is holding the console lock, trylock will fail
2934 * and may_schedule may be set. Ignore and proceed to unlock so
2935 * that messages are flushed out. As this can be called from any
2936 * context and we don't want to get preempted while flushing,
2937 * ensure may_schedule is cleared.
2940 console_may_schedule = 0;
2942 if (mode == CONSOLE_REPLAY_ALL) {
2946 seq = prb_first_valid_seq(prb);
2954 * Return the console tty driver structure and its associated index
2956 struct tty_driver *console_device(int *index)
2959 struct tty_driver *driver = NULL;
2962 for_each_console(c) {
2965 driver = c->device(c, index);
2974 * Prevent further output on the passed console device so that (for example)
2975 * serial drivers can disable console output before suspending a port, and can
2976 * re-enable output afterwards.
2978 void console_stop(struct console *console)
2980 __pr_flush(console, 1000, true);
2982 console->flags &= ~CON_ENABLED;
2985 EXPORT_SYMBOL(console_stop);
2987 void console_start(struct console *console)
2990 console->flags |= CON_ENABLED;
2992 __pr_flush(console, 1000, true);
2994 EXPORT_SYMBOL(console_start);
2996 static int __read_mostly keep_bootcon;
2998 static int __init keep_bootcon_setup(char *str)
3001 pr_info("debug: skip boot console de-registration.\n");
3006 early_param("keep_bootcon", keep_bootcon_setup);
3009 * This is called by register_console() to try to match
3010 * the newly registered console with any of the ones selected
3011 * by either the command line or add_preferred_console() and
3014 * Care need to be taken with consoles that are statically
3015 * enabled such as netconsole
3017 static int try_enable_preferred_console(struct console *newcon,
3018 bool user_specified)
3020 struct console_cmdline *c;
3023 for (i = 0, c = console_cmdline;
3024 i < MAX_CMDLINECONSOLES && c->name[0];
3026 if (c->user_specified != user_specified)
3028 if (!newcon->match ||
3029 newcon->match(newcon, c->name, c->index, c->options) != 0) {
3030 /* default matching */
3031 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3032 if (strcmp(c->name, newcon->name) != 0)
3034 if (newcon->index >= 0 &&
3035 newcon->index != c->index)
3037 if (newcon->index < 0)
3038 newcon->index = c->index;
3040 if (_braille_register_console(newcon, c))
3043 if (newcon->setup &&
3044 (err = newcon->setup(newcon, c->options)) != 0)
3047 newcon->flags |= CON_ENABLED;
3048 if (i == preferred_console)
3049 newcon->flags |= CON_CONSDEV;
3054 * Some consoles, such as pstore and netconsole, can be enabled even
3055 * without matching. Accept the pre-enabled consoles only when match()
3056 * and setup() had a chance to be called.
3058 if (newcon->flags & CON_ENABLED && c->user_specified == user_specified)
3064 /* Try to enable the console unconditionally */
3065 static void try_enable_default_console(struct console *newcon)
3067 if (newcon->index < 0)
3070 if (newcon->setup && newcon->setup(newcon, NULL) != 0)
3073 newcon->flags |= CON_ENABLED;
3076 newcon->flags |= CON_CONSDEV;
3079 #define con_printk(lvl, con, fmt, ...) \
3080 printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
3081 (con->flags & CON_BOOT) ? "boot" : "", \
3082 con->name, con->index, ##__VA_ARGS__)
3085 * The console driver calls this routine during kernel initialization
3086 * to register the console printing procedure with printk() and to
3087 * print any messages that were printed by the kernel before the
3088 * console driver was initialized.
3090 * This can happen pretty early during the boot process (because of
3091 * early_printk) - sometimes before setup_arch() completes - be careful
3092 * of what kernel features are used - they may not be initialised yet.
3094 * There are two types of consoles - bootconsoles (early_printk) and
3095 * "real" consoles (everything which is not a bootconsole) which are
3096 * handled differently.
3097 * - Any number of bootconsoles can be registered at any time.
3098 * - As soon as a "real" console is registered, all bootconsoles
3099 * will be unregistered automatically.
3100 * - Once a "real" console is registered, any attempt to register a
3101 * bootconsoles will be rejected
3103 void register_console(struct console *newcon)
3105 struct console *con;
3106 bool bootcon_enabled = false;
3107 bool realcon_enabled = false;
3110 for_each_console(con) {
3111 if (WARN(con == newcon, "console '%s%d' already registered\n",
3112 con->name, con->index))
3116 for_each_console(con) {
3117 if (con->flags & CON_BOOT)
3118 bootcon_enabled = true;
3120 realcon_enabled = true;
3123 /* Do not register boot consoles when there already is a real one. */
3124 if (newcon->flags & CON_BOOT && realcon_enabled) {
3125 pr_info("Too late to register bootconsole %s%d\n",
3126 newcon->name, newcon->index);
3131 * See if we want to enable this console driver by default.
3133 * Nope when a console is preferred by the command line, device
3136 * The first real console with tty binding (driver) wins. More
3137 * consoles might get enabled before the right one is found.
3139 * Note that a console with tty binding will have CON_CONSDEV
3140 * flag set and will be first in the list.
3142 if (preferred_console < 0) {
3143 if (!console_drivers || !console_drivers->device ||
3144 console_drivers->flags & CON_BOOT) {
3145 try_enable_default_console(newcon);
3149 /* See if this console matches one we selected on the command line */
3150 err = try_enable_preferred_console(newcon, true);
3152 /* If not, try to match against the platform default(s) */
3154 err = try_enable_preferred_console(newcon, false);
3156 /* printk() messages are not printed to the Braille console. */
3157 if (err || newcon->flags & CON_BRL)
3161 * If we have a bootconsole, and are switching to a real console,
3162 * don't print everything out again, since when the boot console, and
3163 * the real console are the same physical device, it's annoying to
3164 * see the beginning boot messages twice
3166 if (bootcon_enabled &&
3167 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
3168 newcon->flags &= ~CON_PRINTBUFFER;
3172 * Put this console in the list - keep the
3173 * preferred driver at the head of the list.
3176 if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
3177 newcon->next = console_drivers;
3178 console_drivers = newcon;
3180 newcon->next->flags &= ~CON_CONSDEV;
3181 /* Ensure this flag is always set for the head of the list */
3182 newcon->flags |= CON_CONSDEV;
3184 newcon->next = console_drivers->next;
3185 console_drivers->next = newcon;
3188 newcon->dropped = 0;
3189 if (newcon->flags & CON_PRINTBUFFER) {
3190 /* Get a consistent copy of @syslog_seq. */
3191 mutex_lock(&syslog_lock);
3192 newcon->seq = syslog_seq;
3193 mutex_unlock(&syslog_lock);
3195 /* Begin with next message. */
3196 newcon->seq = prb_next_seq(prb);
3199 console_sysfs_notify();
3202 * By unregistering the bootconsoles after we enable the real console
3203 * we get the "console xxx enabled" message on all the consoles -
3204 * boot consoles, real consoles, etc - this is to ensure that end
3205 * users know there might be something in the kernel's log buffer that
3206 * went to the bootconsole (that they do not see on the real console)
3208 con_printk(KERN_INFO, newcon, "enabled\n");
3209 if (bootcon_enabled &&
3210 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
3212 for_each_console(con)
3213 if (con->flags & CON_BOOT)
3214 unregister_console(con);
3217 EXPORT_SYMBOL(register_console);
3219 int unregister_console(struct console *console)
3221 struct console *con;
3224 con_printk(KERN_INFO, console, "disabled\n");
3226 res = _braille_unregister_console(console);
3234 if (console_drivers == console) {
3235 console_drivers=console->next;
3238 for_each_console(con) {
3239 if (con->next == console) {
3240 con->next = console->next;
3248 goto out_disable_unlock;
3251 * If this isn't the last console and it has CON_CONSDEV set, we
3252 * need to set it on the next preferred console.
3254 if (console_drivers != NULL && console->flags & CON_CONSDEV)
3255 console_drivers->flags |= CON_CONSDEV;
3257 console->flags &= ~CON_ENABLED;
3259 console_sysfs_notify();
3262 res = console->exit(console);
3267 console->flags &= ~CON_ENABLED;
3272 EXPORT_SYMBOL(unregister_console);
3275 * Initialize the console device. This is called *early*, so
3276 * we can't necessarily depend on lots of kernel help here.
3277 * Just do some early initializations, and do the complex setup
3280 void __init console_init(void)
3284 initcall_entry_t *ce;
3286 /* Setup the default TTY line discipline. */
3290 * set up the console device so that later boot sequences can
3291 * inform about problems etc..
3293 ce = __con_initcall_start;
3294 trace_initcall_level("console");
3295 while (ce < __con_initcall_end) {
3296 call = initcall_from_entry(ce);
3297 trace_initcall_start(call);
3299 trace_initcall_finish(call, ret);
3305 * Some boot consoles access data that is in the init section and which will
3306 * be discarded after the initcalls have been run. To make sure that no code
3307 * will access this data, unregister the boot consoles in a late initcall.
3309 * If for some reason, such as deferred probe or the driver being a loadable
3310 * module, the real console hasn't registered yet at this point, there will
3311 * be a brief interval in which no messages are logged to the console, which
3312 * makes it difficult to diagnose problems that occur during this time.
3314 * To mitigate this problem somewhat, only unregister consoles whose memory
3315 * intersects with the init section. Note that all other boot consoles will
3316 * get unregistered when the real preferred console is registered.
3318 static int __init printk_late_init(void)
3320 struct console *con;
3323 for_each_console(con) {
3324 if (!(con->flags & CON_BOOT))
3327 /* Check addresses that might be used for enabled consoles. */
3328 if (init_section_intersects(con, sizeof(*con)) ||
3329 init_section_contains(con->write, 0) ||
3330 init_section_contains(con->read, 0) ||
3331 init_section_contains(con->device, 0) ||
3332 init_section_contains(con->unblank, 0) ||
3333 init_section_contains(con->data, 0)) {
3335 * Please, consider moving the reported consoles out
3336 * of the init section.
3338 pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
3339 con->name, con->index);
3340 unregister_console(con);
3343 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
3344 console_cpu_notify);
3346 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
3347 console_cpu_notify, NULL);
3349 printk_sysctl_init();
3352 late_initcall(printk_late_init);
3354 #if defined CONFIG_PRINTK
3355 /* If @con is specified, only wait for that console. Otherwise wait for all. */
3356 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
3358 int remaining = timeout_ms;
3367 seq = prb_next_seq(prb);
3374 for_each_console(c) {
3375 if (con && con != c)
3377 if (!console_is_usable(c))
3379 printk_seq = c->seq;
3380 if (printk_seq < seq)
3381 diff += seq - printk_seq;
3385 * If consoles are suspended, it cannot be expected that they
3386 * make forward progress, so timeout immediately. @diff is
3387 * still used to return a valid flush status.
3389 if (console_suspended)
3391 else if (diff != last_diff && reset_on_progress)
3392 remaining = timeout_ms;
3396 if (diff == 0 || remaining == 0)
3399 if (remaining < 0) {
3400 /* no timeout limit */
3402 } else if (remaining < 100) {
3417 * pr_flush() - Wait for printing threads to catch up.
3419 * @timeout_ms: The maximum time (in ms) to wait.
3420 * @reset_on_progress: Reset the timeout if forward progress is seen.
3422 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
3423 * represents infinite waiting.
3425 * If @reset_on_progress is true, the timeout will be reset whenever any
3426 * printer has been seen to make some forward progress.
3428 * Context: Process context. May sleep while acquiring console lock.
3429 * Return: true if all enabled printers are caught up.
3431 static bool pr_flush(int timeout_ms, bool reset_on_progress)
3433 return __pr_flush(NULL, timeout_ms, reset_on_progress);
3437 * Delayed printk version, for scheduler-internal messages:
3439 #define PRINTK_PENDING_WAKEUP 0x01
3440 #define PRINTK_PENDING_OUTPUT 0x02
3442 static DEFINE_PER_CPU(int, printk_pending);
3444 static void wake_up_klogd_work_func(struct irq_work *irq_work)
3446 int pending = this_cpu_xchg(printk_pending, 0);
3448 if (pending & PRINTK_PENDING_OUTPUT) {
3449 /* If trylock fails, someone else is doing the printing */
3450 if (console_trylock())
3454 if (pending & PRINTK_PENDING_WAKEUP)
3455 wake_up_interruptible(&log_wait);
3458 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
3459 IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
3461 static void __wake_up_klogd(int val)
3463 if (!printk_percpu_data_ready())
3468 * Guarantee any new records can be seen by tasks preparing to wait
3469 * before this context checks if the wait queue is empty.
3471 * The full memory barrier within wq_has_sleeper() pairs with the full
3472 * memory barrier within set_current_state() of
3473 * prepare_to_wait_event(), which is called after ___wait_event() adds
3474 * the waiter but before it has checked the wait condition.
3476 * This pairs with devkmsg_read:A and syslog_print:A.
3478 if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
3479 (val & PRINTK_PENDING_OUTPUT)) {
3480 this_cpu_or(printk_pending, val);
3481 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
3486 void wake_up_klogd(void)
3488 __wake_up_klogd(PRINTK_PENDING_WAKEUP);
3491 void defer_console_output(void)
3494 * New messages may have been added directly to the ringbuffer
3495 * using vprintk_store(), so wake any waiters as well.
3497 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
3500 void printk_trigger_flush(void)
3502 defer_console_output();
3505 int vprintk_deferred(const char *fmt, va_list args)
3509 r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
3510 defer_console_output();
3515 int _printk_deferred(const char *fmt, ...)
3520 va_start(args, fmt);
3521 r = vprintk_deferred(fmt, args);
3528 * printk rate limiting, lifted from the networking subsystem.
3530 * This enforces a rate limit: not more than 10 kernel messages
3531 * every 5s to make a denial-of-service attack impossible.
3533 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
3535 int __printk_ratelimit(const char *func)
3537 return ___ratelimit(&printk_ratelimit_state, func);
3539 EXPORT_SYMBOL(__printk_ratelimit);
3542 * printk_timed_ratelimit - caller-controlled printk ratelimiting
3543 * @caller_jiffies: pointer to caller's state
3544 * @interval_msecs: minimum interval between prints
3546 * printk_timed_ratelimit() returns true if more than @interval_msecs
3547 * milliseconds have elapsed since the last time printk_timed_ratelimit()
3550 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
3551 unsigned int interval_msecs)
3553 unsigned long elapsed = jiffies - *caller_jiffies;
3555 if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
3558 *caller_jiffies = jiffies;
3561 EXPORT_SYMBOL(printk_timed_ratelimit);
3563 static DEFINE_SPINLOCK(dump_list_lock);
3564 static LIST_HEAD(dump_list);
3567 * kmsg_dump_register - register a kernel log dumper.
3568 * @dumper: pointer to the kmsg_dumper structure
3570 * Adds a kernel log dumper to the system. The dump callback in the
3571 * structure will be called when the kernel oopses or panics and must be
3572 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
3574 int kmsg_dump_register(struct kmsg_dumper *dumper)
3576 unsigned long flags;
3579 /* The dump callback needs to be set */
3583 spin_lock_irqsave(&dump_list_lock, flags);
3584 /* Don't allow registering multiple times */
3585 if (!dumper->registered) {
3586 dumper->registered = 1;
3587 list_add_tail_rcu(&dumper->list, &dump_list);
3590 spin_unlock_irqrestore(&dump_list_lock, flags);
3594 EXPORT_SYMBOL_GPL(kmsg_dump_register);
3597 * kmsg_dump_unregister - unregister a kmsg dumper.
3598 * @dumper: pointer to the kmsg_dumper structure
3600 * Removes a dump device from the system. Returns zero on success and
3601 * %-EINVAL otherwise.
3603 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
3605 unsigned long flags;
3608 spin_lock_irqsave(&dump_list_lock, flags);
3609 if (dumper->registered) {
3610 dumper->registered = 0;
3611 list_del_rcu(&dumper->list);
3614 spin_unlock_irqrestore(&dump_list_lock, flags);
3619 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
3621 static bool always_kmsg_dump;
3622 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
3624 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
3627 case KMSG_DUMP_PANIC:
3629 case KMSG_DUMP_OOPS:
3631 case KMSG_DUMP_EMERG:
3633 case KMSG_DUMP_SHUTDOWN:
3639 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
3642 * kmsg_dump - dump kernel log to kernel message dumpers.
3643 * @reason: the reason (oops, panic etc) for dumping
3645 * Call each of the registered dumper's dump() callback, which can
3646 * retrieve the kmsg records with kmsg_dump_get_line() or
3647 * kmsg_dump_get_buffer().
3649 void kmsg_dump(enum kmsg_dump_reason reason)
3651 struct kmsg_dumper *dumper;
3654 list_for_each_entry_rcu(dumper, &dump_list, list) {
3655 enum kmsg_dump_reason max_reason = dumper->max_reason;
3658 * If client has not provided a specific max_reason, default
3659 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
3661 if (max_reason == KMSG_DUMP_UNDEF) {
3662 max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
3665 if (reason > max_reason)
3668 /* invoke dumper which will iterate over records */
3669 dumper->dump(dumper, reason);
3675 * kmsg_dump_get_line - retrieve one kmsg log line
3676 * @iter: kmsg dump iterator
3677 * @syslog: include the "<4>" prefixes
3678 * @line: buffer to copy the line to
3679 * @size: maximum size of the buffer
3680 * @len: length of line placed into buffer
3682 * Start at the beginning of the kmsg buffer, with the oldest kmsg
3683 * record, and copy one record into the provided buffer.
3685 * Consecutive calls will return the next available record moving
3686 * towards the end of the buffer with the youngest messages.
3688 * A return value of FALSE indicates that there are no more records to
3691 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
3692 char *line, size_t size, size_t *len)
3694 u64 min_seq = latched_seq_read_nolock(&clear_seq);
3695 struct printk_info info;
3696 unsigned int line_count;
3697 struct printk_record r;
3701 if (iter->cur_seq < min_seq)
3702 iter->cur_seq = min_seq;
3704 prb_rec_init_rd(&r, &info, line, size);
3706 /* Read text or count text lines? */
3708 if (!prb_read_valid(prb, iter->cur_seq, &r))
3710 l = record_print_text(&r, syslog, printk_time);
3712 if (!prb_read_valid_info(prb, iter->cur_seq,
3713 &info, &line_count)) {
3716 l = get_record_print_text_size(&info, line_count, syslog,
3721 iter->cur_seq = r.info->seq + 1;
3728 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
3731 * kmsg_dump_get_buffer - copy kmsg log lines
3732 * @iter: kmsg dump iterator
3733 * @syslog: include the "<4>" prefixes
3734 * @buf: buffer to copy the line to
3735 * @size: maximum size of the buffer
3736 * @len_out: length of line placed into buffer
3738 * Start at the end of the kmsg buffer and fill the provided buffer
3739 * with as many of the *youngest* kmsg records that fit into it.
3740 * If the buffer is large enough, all available kmsg records will be
3741 * copied with a single call.
3743 * Consecutive calls will fill the buffer with the next block of
3744 * available older records, not including the earlier retrieved ones.
3746 * A return value of FALSE indicates that there are no more records to
3749 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
3750 char *buf, size_t size, size_t *len_out)
3752 u64 min_seq = latched_seq_read_nolock(&clear_seq);
3753 struct printk_info info;
3754 struct printk_record r;
3759 bool time = printk_time;
3764 if (iter->cur_seq < min_seq)
3765 iter->cur_seq = min_seq;
3767 if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
3768 if (info.seq != iter->cur_seq) {
3769 /* messages are gone, move to first available one */
3770 iter->cur_seq = info.seq;
3775 if (iter->cur_seq >= iter->next_seq)
3779 * Find first record that fits, including all following records,
3780 * into the user-provided buffer for this dump. Pass in size-1
3781 * because this function (by way of record_print_text()) will
3782 * not write more than size-1 bytes of text into @buf.
3784 seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
3785 size - 1, syslog, time);
3788 * Next kmsg_dump_get_buffer() invocation will dump block of
3789 * older records stored right before this one.
3793 prb_rec_init_rd(&r, &info, buf, size);
3796 prb_for_each_record(seq, prb, seq, &r) {
3797 if (r.info->seq >= iter->next_seq)
3800 len += record_print_text(&r, syslog, time);
3802 /* Adjust record to store to remaining buffer space. */
3803 prb_rec_init_rd(&r, &info, buf + len, size - len);
3806 iter->next_seq = next_seq;
3813 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
3816 * kmsg_dump_rewind - reset the iterator
3817 * @iter: kmsg dump iterator
3819 * Reset the dumper's iterator so that kmsg_dump_get_line() and
3820 * kmsg_dump_get_buffer() can be called again and used multiple
3821 * times within the same dumper.dump() callback.
3823 void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
3825 iter->cur_seq = latched_seq_read_nolock(&clear_seq);
3826 iter->next_seq = prb_next_seq(prb);
3828 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
3833 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
3834 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
3837 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
3838 * spinning lock is not owned by any CPU.
3840 * Context: Any context.
3842 void __printk_cpu_sync_wait(void)
3846 } while (atomic_read(&printk_cpu_sync_owner) != -1);
3848 EXPORT_SYMBOL(__printk_cpu_sync_wait);
3851 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
3854 * If no processor has the lock, the calling processor takes the lock and
3855 * becomes the owner. If the calling processor is already the owner of the
3856 * lock, this function succeeds immediately.
3858 * Context: Any context. Expects interrupts to be disabled.
3859 * Return: 1 on success, otherwise 0.
3861 int __printk_cpu_sync_try_get(void)
3866 cpu = smp_processor_id();
3869 * Guarantee loads and stores from this CPU when it is the lock owner
3870 * are _not_ visible to the previous lock owner. This pairs with
3871 * __printk_cpu_sync_put:B.
3873 * Memory barrier involvement:
3875 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
3876 * then __printk_cpu_sync_put:A can never read from
3877 * __printk_cpu_sync_try_get:B.
3881 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
3882 * of the previous CPU
3884 * ACQUIRE from __printk_cpu_sync_try_get:A to
3885 * __printk_cpu_sync_try_get:B of this CPU
3887 old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
3888 cpu); /* LMM(__printk_cpu_sync_try_get:A) */
3891 * This CPU is now the owner and begins loading/storing
3892 * data: LMM(__printk_cpu_sync_try_get:B)
3896 } else if (old == cpu) {
3897 /* This CPU is already the owner. */
3898 atomic_inc(&printk_cpu_sync_nested);
3904 EXPORT_SYMBOL(__printk_cpu_sync_try_get);
3907 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
3909 * The calling processor must be the owner of the lock.
3911 * Context: Any context. Expects interrupts to be disabled.
3913 void __printk_cpu_sync_put(void)
3915 if (atomic_read(&printk_cpu_sync_nested)) {
3916 atomic_dec(&printk_cpu_sync_nested);
3921 * This CPU is finished loading/storing data:
3922 * LMM(__printk_cpu_sync_put:A)
3926 * Guarantee loads and stores from this CPU when it was the
3927 * lock owner are visible to the next lock owner. This pairs
3928 * with __printk_cpu_sync_try_get:A.
3930 * Memory barrier involvement:
3932 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
3933 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
3937 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
3940 * ACQUIRE from __printk_cpu_sync_try_get:A to
3941 * __printk_cpu_sync_try_get:B of the next CPU
3943 atomic_set_release(&printk_cpu_sync_owner,
3944 -1); /* LMM(__printk_cpu_sync_put:B) */
3946 EXPORT_SYMBOL(__printk_cpu_sync_put);
3947 #endif /* CONFIG_SMP */