Apply Linux 6.6.15-rt22 patches.
This is based on patche-6.6.15-rt22.patch.gz.
Change-Id: Ia5490167dbc54c5c42f7ff46b9df536af528f61a
Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com>
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
+ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT
select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
if (user_mode(regs))
goto bad_area;
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
do_bad_area(addr, fsr, regs);
return 0;
}
union vfp_state *vfp_current_hw_state[NR_CPUS];
/*
+ * Claim ownership of the VFP unit.
+ *
+ * The caller may change VFP registers until vfp_unlock() is called.
+ *
+ * local_bh_disable() is used to disable preemption and to disable VFP
+ * processing in softirq context. On PREEMPT_RT kernels local_bh_disable() is
+ * not sufficient because it only serializes soft interrupt related sections
+ * via a local lock, but stays preemptible. Disabling preemption is the right
+ * choice here as bottom half processing is always in thread context on RT
+ * kernels so it implicitly prevents bottom half processing as well.
+ */
+static void vfp_lock(void)
+{
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_disable();
+ else
+ preempt_disable();
+}
+
+static void vfp_unlock(void)
+{
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_enable();
+ else
+ preempt_enable();
+}
+
+/*
* Is 'thread's most up to date state stored in this CPUs hardware?
* Must be called from non-preemptible context.
*/
/*
* Process bitmask of exception conditions.
*/
-static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
+static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
{
int si_code = 0;
if (exceptions == VFP_EXCEPTION_ERROR) {
vfp_panic("unhandled bounce", inst);
- vfp_raise_sigfpe(FPE_FLTINV, regs);
- return;
+ return FPE_FLTINV;
}
/*
RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
- if (si_code)
- vfp_raise_sigfpe(si_code, regs);
+ return si_code;
}
/*
static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
u32 fpscr, orig_fpscr, fpsid, exceptions;
+ int si_code2 = 0;
+ int si_code = 0;
pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
* unallocated VFP instruction but with FPSCR.IXE set and not
* on VFP subarch 1.
*/
- vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
- return;
+ si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
+ goto exit;
}
/*
*/
exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
if (exceptions)
- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
/*
* If there isn't a second FP instruction, exit now. Note that
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
- return;
+ goto exit;
/*
* The barrier() here prevents fpinst2 being read
emulate:
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
if (exceptions)
- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
+exit:
+ vfp_unlock();
+ if (si_code2)
+ vfp_raise_sigfpe(si_code2, regs);
+ if (si_code)
+ vfp_raise_sigfpe(si_code, regs);
}
static void vfp_enable(void *unused)
*/
void vfp_sync_hwstate(struct thread_info *thread)
{
- unsigned int cpu = get_cpu();
+ vfp_lock();
- local_bh_disable();
-
- if (vfp_state_in_hw(cpu, thread)) {
+ if (vfp_state_in_hw(raw_smp_processor_id(), thread)) {
u32 fpexc = fmrx(FPEXC);
/*
fmxr(FPEXC, fpexc);
}
- local_bh_enable();
- put_cpu();
+ vfp_unlock();
}
/* Ensure that the thread reloads the hardware VFP state on the next use. */
if (!user_mode(regs))
return vfp_kmode_exception(regs, trigger);
- local_bh_disable();
+ vfp_lock();
fpexc = fmrx(FPEXC);
/*
* replay the instruction that trapped.
*/
fmxr(FPEXC, fpexc);
+ vfp_unlock();
} else {
/* Check for synchronous or asynchronous exceptions */
if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
if (!(fpscr & FPSCR_IXE)) {
if (!(fpscr & FPSCR_LENGTH_MASK)) {
pr_debug("not VFP\n");
- local_bh_enable();
+ vfp_unlock();
return -ENOEXEC;
}
fpexc |= FPEXC_DEX;
}
}
bounce: regs->ARM_pc += 4;
+ /* VFP_bounce() will invoke vfp_unlock() */
VFP_bounce(trigger, fpexc, regs);
}
- local_bh_enable();
return 0;
}
unsigned int cpu;
u32 fpexc;
- local_bh_disable();
+ vfp_lock();
/*
* Kernel mode NEON is only allowed outside of hardirq context with
{
/* Disable the NEON/VFP unit. */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
- local_bh_enable();
+ vfp_unlock();
}
EXPORT_SYMBOL(kernel_neon_end);
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_RT
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
+ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
select HAVE_RSEQ
select HAVE_SETUP_PER_CPU_AREA if PPC64
select HAVE_SOFTIRQ_ON_OWN_STACK
*/
static __always_inline void boot_init_stack_canary(void)
{
- unsigned long canary = get_random_canary();
+ unsigned long canary;
+#ifndef CONFIG_PREEMPT_RT
+ canary = get_random_canary();
+#else
+ canary = ((unsigned long)&canary) & CANARY_MASK;
+#endif
current->stack_canary = canary;
#ifdef CONFIG_PPC64
get_paca()->canary = canary;
static int __die(const char *str, struct pt_regs *regs, long err)
{
+ const char *pr = "";
+
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
+
printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
PAGE_SIZE / 1024, get_mmu_str(),
- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
+ pr,
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
depends on KVM && PPC_E500
+ depends on !PREEMPT_RT
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
config PPC_PSERIES
depends on PPC64 && PPC_BOOK3S
bool "IBM pSeries & new (POWER5-based) iSeries"
+ select GENERIC_ALLOCATOR
select HAVE_PCSPKR_PLATFORM
select MPIC
select OF_DYNAMIC
#include <linux/of_address.h>
#include <linux/iommu.h>
#include <linux/rculist.h>
+#include <linux/local_lock.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
return ret;
}
-static DEFINE_PER_CPU(__be64 *, tce_page);
+struct tce_page {
+ __be64 * page;
+ local_lock_t lock;
+};
+static DEFINE_PER_CPU(struct tce_page, tce_page) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
direction, attrs);
}
- local_irq_save(flags); /* to protect tcep and the page behind it */
+ /* to protect tcep and the page behind it */
+ local_lock_irqsave(&tce_page.lock, flags);
- tcep = __this_cpu_read(tce_page);
+ tcep = __this_cpu_read(tce_page.page);
/* This is safe to do since interrupts are off when we're called
* from iommu_alloc{,_sg}()
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
- local_irq_restore(flags);
+ local_unlock_irqrestore(&tce_page.lock, flags);
return tce_build_pSeriesLP(tbl->it_index, tcenum,
tceshift,
npages, uaddr, direction, attrs);
}
- __this_cpu_write(tce_page, tcep);
+ __this_cpu_write(tce_page.page, tcep);
}
rpn = __pa(uaddr) >> tceshift;
tcenum += limit;
} while (npages > 0 && !rc);
- local_irq_restore(flags);
+ local_unlock_irqrestore(&tce_page.lock, flags);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
DMA_BIDIRECTIONAL, 0);
}
- local_irq_disable(); /* to protect tcep and the page behind it */
- tcep = __this_cpu_read(tce_page);
+ /* to protect tcep and the page behind it */
+ local_lock_irq(&tce_page.lock);
+ tcep = __this_cpu_read(tce_page.page);
if (!tcep) {
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
if (!tcep) {
- local_irq_enable();
+ local_unlock_irq(&tce_page.lock);
return -ENOMEM;
}
- __this_cpu_write(tce_page, tcep);
+ __this_cpu_write(tce_page.page, tcep);
}
proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
/* error cleanup: caller will clear whole range */
- local_irq_enable();
+ local_unlock_irq(&tce_page.lock);
return rc;
}
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
+ select ARCH_SUPPORTS_RT
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
select HAVE_PERF_USER_STACK_DUMP
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_PREEMPT_DYNAMIC_KEY if !XIP_KERNEL
+ select HAVE_PREEMPT_AUTO
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
-void check_unaligned_access(int cpu);
-
#endif
* - pending work-to-be-done flags are in lowest half-word
* - other flags in upper half-word(s)
*/
+#define TIF_ARCH_RESCHED_LAZY 0 /* Lazy rescheduling */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_ARCH_RESCHED_LAZY (1 << TIF_ARCH_RESCHED_LAZY)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
#include <linux/acpi.h>
#include <linux/bitmap.h>
+#include <linux/cpuhotplug.h>
#include <linux/ctype.h>
#include <linux/log2.h>
#include <linux/memory.h>
#define MISALIGNED_ACCESS_JIFFIES_LG2 1
#define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
unsigned long elf_hwcap __read_mostly;
return hwcap;
}
-void check_unaligned_access(int cpu)
+static int check_unaligned_access(void *param)
{
+ int cpu = smp_processor_id();
u64 start_cycles, end_cycles;
u64 word_cycles;
u64 byte_cycles;
int ratio;
unsigned long start_jiffies, now;
- struct page *page;
+ struct page *page = param;
void *dst;
void *src;
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
return;
- page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
- if (!page) {
- pr_warn("Can't alloc pages to measure memcpy performance");
- return;
- }
-
/* Make an unaligned destination buffer. */
dst = (void *)((unsigned long)page_address(page) | 0x1);
/* Unalign src as well, but differently (off by 1 + 2 = 3). */
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
cpu);
- goto out;
+ return 0;
}
if (word_cycles < byte_cycles)
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
per_cpu(misaligned_access_speed, cpu) = speed;
+ return 0;
+}
-out:
- __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
+static void check_unaligned_access_nonboot_cpu(void *param)
+{
+ unsigned int cpu = smp_processor_id();
+ struct page **pages = param;
+
+ if (smp_processor_id() != 0)
+ check_unaligned_access(pages[cpu]);
}
-static int check_unaligned_access_boot_cpu(void)
+static int riscv_online_cpu(unsigned int cpu)
{
- check_unaligned_access(0);
+ static struct page *buf;
+
+ /* We are already set since the last check */
+ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
+ return 0;
+
+ buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!buf) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return -ENOMEM;
+ }
+
+ check_unaligned_access(buf);
+ __free_pages(buf, MISALIGNED_BUFFER_ORDER);
+ return 0;
+}
+
+/* Measure unaligned access on all CPUs present at boot in parallel. */
+static int check_unaligned_access_all_cpus(void)
+{
+ unsigned int cpu;
+ unsigned int cpu_count = num_possible_cpus();
+ struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!bufs) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return 0;
+ }
+
+ /*
+ * Allocate separate buffers for each CPU so there's no fighting over
+ * cache lines.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!bufs[cpu]) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ goto out;
+ }
+ }
+
+ /* Check everybody except 0, who stays behind to tend jiffies. */
+ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
+
+ /* Check core 0. */
+ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
+
+ /* Setup hotplug callback for any new CPUs that come online. */
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+ riscv_online_cpu, NULL);
+
+out:
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (bufs[cpu])
+ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
+ }
+
+ kfree(bufs);
return 0;
}
-arch_initcall(check_unaligned_access_boot_cpu);
+arch_initcall(check_unaligned_access_all_cpus);
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
- check_unaligned_access(curr_cpuid);
if (has_vector()) {
if (riscv_v_setup_vsize())
select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
+ select ARCH_SUPPORTS_RT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
select HAVE_PREEMPT_DYNAMIC_CALL
+ select HAVE_PREEMPT_AUTO
select HAVE_RSEQ
select HAVE_RUST if X86_64
select HAVE_SYSCALL_TRACEPOINTS
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
-#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
-#define TIF_SSBD 5 /* Speculative store bypass disable */
+#define TIF_ARCH_RESCHED_LAZY 4 /* Lazy rescheduling */
+#define TIF_SINGLESTEP 5 /* reenable singlestep on user return*/
+#define TIF_SSBD 6 /* Speculative store bypass disable */
#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
#define TIF_SPEC_L1D_FLUSH 10 /* Flush L1D on mm switches (processes) */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_ARCH_RESCHED_LAZY (1 << TIF_ARCH_RESCHED_LAZY)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_SSBD (1 << TIF_SSBD)
#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
*/
static void __cpuidle acpi_safe_halt(void)
{
- if (!tif_need_resched()) {
+ if (!need_resched()) {
raw_safe_halt();
raw_local_irq_disable();
}
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent);
+#ifdef CONFIG_PREEMPT_RT
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
+{
+ size_t index;
+
+ for (index = 0; index < num_pages; index++)
+ spin_lock_init(&zram->table[index].lock);
+}
+
+static int zram_slot_trylock(struct zram *zram, u32 index)
+{
+ int ret;
+
+ ret = spin_trylock(&zram->table[index].lock);
+ if (ret)
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
+ return ret;
+}
+
+static void zram_slot_lock(struct zram *zram, u32 index)
+{
+ spin_lock(&zram->table[index].lock);
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
+}
+
+static void zram_slot_unlock(struct zram *zram, u32 index)
+{
+ __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
+ spin_unlock(&zram->table[index].lock);
+}
+
+#else
+
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
+
static int zram_slot_trylock(struct zram *zram, u32 index)
{
return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
{
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
}
+#endif
static inline bool init_done(struct zram *zram)
{
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
+ zram_meta_init_table_locks(zram, num_pages);
return true;
}
unsigned long element;
};
unsigned long flags;
+#ifdef CONFIG_PREEMPT_RT
+ spinlock_t lock;
+#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
ktime_t ac_time;
#endif
*/
inline void dc_assert_fp_enabled(void)
{
- int *pcpu, depth = 0;
+ int depth;
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
- depth = *pcpu;
- put_cpu_ptr(&fpu_recursion_depth);
+ depth = __this_cpu_read(fpu_recursion_depth);
ASSERT(depth >= 1);
}
*/
void dc_fpu_begin(const char *function_name, const int line)
{
- int *pcpu;
+ int depth;
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
- *pcpu += 1;
+ WARN_ON_ONCE(!in_task());
+ preempt_disable();
+ depth = __this_cpu_inc_return(fpu_recursion_depth);
- if (*pcpu == 1) {
+ if (depth == 1) {
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
- migrate_disable();
kernel_fpu_begin();
#elif defined(CONFIG_PPC64)
- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
- preempt_disable();
+ if (cpu_has_feature(CPU_FTR_VSX_COMP))
enable_kernel_vsx();
- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
- preempt_disable();
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
enable_kernel_altivec();
- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
- preempt_disable();
+ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
enable_kernel_fp();
- }
#elif defined(CONFIG_ARM64)
kernel_neon_begin();
#endif
}
- TRACE_DCN_FPU(true, function_name, line, *pcpu);
- put_cpu_ptr(&fpu_recursion_depth);
+ TRACE_DCN_FPU(true, function_name, line, depth);
}
/**
*/
void dc_fpu_end(const char *function_name, const int line)
{
- int *pcpu;
+ int depth;
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
- *pcpu -= 1;
- if (*pcpu <= 0) {
+ depth = __this_cpu_dec_return(fpu_recursion_depth);
+ if (depth == 0) {
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
kernel_fpu_end();
- migrate_enable();
#elif defined(CONFIG_PPC64)
- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+ if (cpu_has_feature(CPU_FTR_VSX_COMP))
disable_kernel_vsx();
- preempt_enable();
- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
disable_kernel_altivec();
- preempt_enable();
- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
disable_kernel_fp();
- preempt_enable();
- }
#elif defined(CONFIG_ARM64)
kernel_neon_end();
#endif
+ } else {
+ WARN_ON_ONCE(depth < 0);
}
- TRACE_DCN_FPU(false, function_name, line, *pcpu);
- put_cpu_ptr(&fpu_recursion_depth);
+ TRACE_DCN_FPU(false, function_name, line, depth);
+ preempt_enable();
}
bool fast_validate)
{
bool voltage_supported;
+ display_e2e_pipe_params_st *pipes;
+
+ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ if (!pipes)
+ return false;
+
DC_FP_START();
- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
DC_FP_END();
+
+ kfree(pipes);
return voltage_supported;
}
bool fast_validate)
{
bool voltage_supported;
+ display_e2e_pipe_params_st *pipes;
+
+ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ if (!pipes)
+ return false;
+
DC_FP_START();
- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
DC_FP_END();
+
+ kfree(pipes);
return voltage_supported;
}
}
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
{
bool out = false;
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
out = false;
validate_out:
- kfree(pipes);
BW_VAL_TRACE_FINISH();
return out;
}
-bool dcn20_validate_bandwidth_fp(struct dc *dc,
- struct dc_state *context,
- bool fast_validate)
+bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
ASSERT(context != dc->current_state);
if (fast_validate) {
- return dcn20_validate_bandwidth_internal(dc, context, true);
+ return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
}
// Best case, we support full UCLK switch latency
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
// Fallback: Try to only support G6 temperature read latency
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
+ memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
&context->bw_ctx.dml, pipes, pipe_cnt);
}
-bool dcn21_validate_bandwidth_fp(struct dc *dc,
- struct dc_state *context,
- bool fast_validate)
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
{
bool out = false;
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
out = false;
validate_out:
- kfree(pipes);
BW_VAL_TRACE_FINISH();
unsigned int num_states);
void dcn20_patch_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb);
-bool dcn20_validate_bandwidth_fp(struct dc *dc,
- struct dc_state *context,
- bool fast_validate);
+bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
+ bool fast_validate, display_e2e_pipe_params_st *pipes);
void dcn20_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
-bool dcn21_validate_bandwidth_fp(struct dc *dc,
- struct dc_state *context,
- bool fast_validate);
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
+ fast_validate, display_e2e_pipe_params_st *pipes);
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
depends on X86 && PCI
- depends on !PREEMPT_RT
select INTEL_GTT if X86
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
*/
intel_psr_wait_for_idle_locked(new_crtc_state);
- local_irq_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_disable();
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
break;
}
- local_irq_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_enable();
timeout = schedule_timeout(timeout);
- local_irq_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_disable();
}
finish_wait(wq, &wait);
return;
irq_disable:
- local_irq_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_disable();
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
- local_irq_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_enable();
if (intel_vgpu_active(dev_priv))
return;
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
/* Get optional system timestamp before query. */
if (stime)
if (etime)
*etime = ktime_get();
- /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
/* Kick the work once more to drain the signalers, and disarm the irq */
irq_work_sync(&b->irq_work);
while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
- local_irq_disable();
- signal_irq_work(&b->irq_work);
- local_irq_enable();
+ irq_work_queue(&b->irq_work);
cond_resched();
+ irq_work_sync(&b->irq_work);
}
}
* and context switches) submission.
*/
- spin_lock(&sched_engine->lock);
+ spin_lock_irq(&sched_engine->lock);
/*
* If the queue is higher priority than the last
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
- spin_unlock(&sched_engine->lock);
+ spin_unlock_irq(&sched_engine->lock);
return;
}
}
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.sched_engine->lock);
- spin_unlock(&engine->sched_engine->lock);
+ spin_unlock_irq(&engine->sched_engine->lock);
return; /* leave this for another sibling */
}
*/
sched_engine->queue_priority_hint = queue_prio(sched_engine);
i915_sched_engine_reset_on_empty(sched_engine);
- spin_unlock(&sched_engine->lock);
+ spin_unlock_irq(&sched_engine->lock);
/*
* We can skip poking the HW if we ended up with exactly the same set
}
}
-static void execlists_dequeue_irq(struct intel_engine_cs *engine)
-{
- local_irq_disable(); /* Suspend interrupts across request submission */
- execlists_dequeue(engine);
- local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
-}
-
static void clear_ports(struct i915_request **ports, int count)
{
memset_p((void **)ports, NULL, count);
}
if (!engine->execlists.pending[0]) {
- execlists_dequeue_irq(engine);
+ execlists_dequeue(engine);
start_timeslice(engine);
}
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
udelay(50);
- err = wait_for_atomic(i915_in_reset(pdev), 50);
+ err = _wait_for_atomic(i915_in_reset(pdev), 50, 0);
/* Clear the reset request. */
pci_write_config_byte(pdev, I915_GDRST, 0);
udelay(50);
if (!err)
- err = wait_for_atomic(!i915_in_reset(pdev), 50);
+ err = _wait_for_atomic(!i915_in_reset(pdev), 50, 0);
return err;
}
struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- return wait_for_atomic(g4x_reset_complete(pdev), 50);
+ return _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
}
static int g4x_do_reset(struct intel_gt *gt,
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
+ ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
if (ret) {
GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
+ ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
if (ret) {
GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
- preempt_disable();
ret = reset(gt, reset_mask, retry);
- preempt_enable();
wa_14015076503_end(gt, reset_mask);
}
{
int err;
unsigned int sleep_period_ms = 1;
- bool not_atomic = !in_atomic() && !irqs_disabled();
+ bool not_atomic = !in_atomic() && !irqs_disabled() && !rcu_preempt_depth();
/*
* FIXME: Have caller pass in if we are in an atomic context to avoid
RQ_TRACE(request, "\n");
- GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->sched_engine->lock);
/*
*/
RQ_TRACE(request, "\n");
- GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->sched_engine->lock);
/*
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
+#ifdef CONFIG_PREEMPT_RT
+#define NOTRACE
+#endif
+
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
TP_ARGS(rq)
);
-#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
+#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE)
DEFINE_EVENT(i915_request, i915_request_guc_submit,
TP_PROTO(struct i915_request *rq),
TP_ARGS(rq)
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
#else
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
unsigned long flags;
unsigned int h_lcr;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
h_lcr = *CSR_H_UBRLCR;
if (break_state)
h_lcr |= H_UBRLCR_BREAK;
else
h_lcr &= ~H_UBRLCR_BREAK;
*CSR_H_UBRLCR = h_lcr;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int serial21285_startup(struct uart_port *port)
if (port->fifosize)
h_lcr |= H_UBRLCR_FIFO;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
*CSR_H_UBRLCR = h_lcr;
*CSR_UARTCON = 1;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *serial21285_type(struct uart_port *port)
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
__aspeed_vuart_set_throttle(up, throttle);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void aspeed_vuart_throttle(struct uart_port *port)
if (iir & UART_IIR_NO_INT)
return 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
lsr = serial_port_in(port, UART_LSR);
if (interrupts == 0)
return IRQ_NONE;
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
/* Clear all interrupts */
udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts);
if ((rval | tval) == 0)
dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
return IRQ_HANDLED;
}
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
up->ier &= ~UART_IER_RDI;
serial_port_out(port, UART_IER, up->ier);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
priv->tx_running = false;
priv->dma.rx_dma = NULL;
struct brcmuart_priv *priv = up->port.private_data;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
priv->shutdown = true;
if (priv->dma_enabled) {
stop_rx_dma(up);
*/
up->dma = NULL;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_do_shutdown(port);
}
* interrupt but there is no data ready.
*/
if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) {
- spin_lock_irqsave(&p->lock, flags);
+ uart_port_lock_irqsave(p, &flags);
status = serial_port_in(p, UART_LSR);
if ((status & UART_LSR_DR) == 0) {
handled = 1;
}
- spin_unlock_irqrestore(&p->lock, flags);
+ uart_port_unlock_irqrestore(p, flags);
if (handled)
return 1;
}
if (priv->shutdown)
return HRTIMER_NORESTART;
- spin_lock_irqsave(&p->lock, flags);
+ uart_port_lock_irqsave(p, &flags);
status = serial_port_in(p, UART_LSR);
/*
status |= UART_MCR_RTS;
serial_port_out(p, UART_MCR, status);
}
- spin_unlock_irqrestore(&p->lock, flags);
+ uart_port_unlock_irqrestore(p, flags);
return HRTIMER_NORESTART;
}
* This will prevent resume from enabling RTS before the
* baud rate has been restored.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
priv->saved_mctrl = port->mctrl;
port->mctrl &= ~TIOCM_RTS;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_suspend_port(priv->line);
clk_disable_unprepare(priv->baud_mux_clk);
if (priv->saved_mctrl & TIOCM_RTS) {
/* Restore RTS */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->mctrl |= TIOCM_RTS;
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
return 0;
unsigned int iir, ier = 0, lsr;
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Must disable interrupts or else we risk racing with the interrupt
if (up->port.irq)
serial_out(up, UART_IER, ier);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/* Standard timer interval plus 0.2s to keep the port running */
mod_timer(&up->timer,
#ifdef CONFIG_SERIAL_8250_CONSOLE
+#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
static void univ8250_console_write(struct console *co, const char *s,
unsigned int count)
{
serial8250_console_write(up, s, count);
}
+#else
+static bool univ8250_console_write_atomic(struct console *co,
+ struct nbcon_write_context *wctxt)
+{
+ struct uart_8250_port *up = &serial8250_ports[co->index];
+
+ return serial8250_console_write_atomic(up, wctxt);
+}
+
+static bool univ8250_console_write_thread(struct console *co,
+ struct nbcon_write_context *wctxt)
+{
+ struct uart_8250_port *up = &serial8250_ports[co->index];
+
+ return serial8250_console_write_thread(up, wctxt);
+}
+
+static void univ8250_console_driver_enter(struct console *con, unsigned long *flags)
+{
+ struct uart_port *up = &serial8250_ports[con->index].port;
+
+ __uart_port_lock_irqsave(up, flags);
+}
+
+static void univ8250_console_driver_exit(struct console *con, unsigned long flags)
+{
+ struct uart_port *up = &serial8250_ports[con->index].port;
+
+ __uart_port_unlock_irqrestore(up, flags);
+}
+#endif /* CONFIG_SERIAL_8250_LEGACY_CONSOLE */
static int univ8250_console_setup(struct console *co, char *options)
{
static struct console univ8250_console = {
.name = "ttyS",
+#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
.write = univ8250_console_write,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
+#else
+ .write_atomic = univ8250_console_write_atomic,
+ .write_thread = univ8250_console_write_thread,
+ .driver_enter = univ8250_console_driver_enter,
+ .driver_exit = univ8250_console_driver_exit,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON,
+#endif
.device = uart_console_device,
.setup = univ8250_console_setup,
.exit = univ8250_console_exit,
.match = univ8250_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
.data = &serial8250_reg,
};
struct uart_port *port = &up->port;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
up->port.read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/**
if (uart->em485) {
unsigned long flags;
- spin_lock_irqsave(&uart->port.lock, flags);
+ uart_port_lock_irqsave(&uart->port, &flags);
serial8250_em485_destroy(uart);
- spin_unlock_irqrestore(&uart->port.lock, flags);
+ uart_port_unlock_irqrestore(&uart->port, flags);
}
uart_remove_one_port(&serial8250_reg, &uart->port);
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
dma->tx_running = 0;
if (ret || !dma->tx_running)
serial8250_set_THRI(p);
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
}
static void __dma_rx_complete(struct uart_8250_port *p)
struct uart_8250_dma *dma = p->dma;
unsigned long flags;
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
if (dma->rx_running)
__dma_rx_complete(p);
*/
if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
p->dma->rx_dma(p);
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
}
int serial8250_tx_dma(struct uart_8250_port *p)
* so we limit the workaround only to non-DMA mode.
*/
if (!up->dma && rx_timeout) {
- spin_lock_irqsave(&p->lock, flags);
+ uart_port_lock_irqsave(p, &flags);
status = serial_lsr_in(up);
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
(void) p->serial_in(p, UART_RX);
- spin_unlock_irqrestore(&p->lock, flags);
+ uart_port_unlock_irqrestore(p, flags);
}
/* Manually stop the Rx DMA transfer when acting as flow controller */
if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
- spin_lock_irqsave(&p->lock, flags);
+ uart_port_lock_irqsave(p, &flags);
status = serial_lsr_in(up);
- spin_unlock_irqrestore(&p->lock, flags);
+ uart_port_unlock_irqrestore(p, flags);
if (status & (UART_LSR_DR | UART_LSR_BI)) {
dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial_port_out(port, UART_IER, 0);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
return serial8250_do_startup(port);
}
unsigned int iir;
struct uart_8250_port *up = up_to_u8250p(port);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
iir = port->serial_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
up->lsr_saved_flags &= ~UART_LSR_BI;
port->serial_in(port, UART_RX);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 1;
}
if (data->rx_status == DMA_RX_SHUTDOWN)
return;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
total = dma->rx_size - state.residue;
mtk8250_rx_dma(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void mtk8250_rx_dma(struct uart_8250_port *up)
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
if (uart_console(port))
up->port.cons->cflag = termios->c_cflag;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
* interrupts disabled.
*/
pm_runtime_get_sync(port->dev);
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
/*
* Update the per-port timeout.
}
omap8250_restore_regs(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
pm_runtime_get_sync(port->dev);
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
unsigned long delay;
/* Synchronize UART_IER access against the console. */
- spin_lock(&port->lock);
+ uart_port_lock(port);
up->ier = port->serial_in(port, UART_IER);
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
port->ops->stop_rx(port);
*/
cancel_delayed_work(&up->overrun_backoff);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
schedule_delayed_work(&up->overrun_backoff, delay);
}
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
up->ier = UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
#ifdef CONFIG_PM
up->capabilities |= UART_CAP_RPM;
serial_out(up, UART_OMAP_WER, priv->wer);
if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
up->dma->rx_dma(up);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
enable_irq(up->port.irq);
serial_out(up, UART_OMAP_EFR2, 0x0);
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
up->ier = 0;
serial_out(up, UART_IER, 0);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
disable_irq_nosync(up->port.irq);
dev_pm_clear_wake_irq(port->dev);
pm_runtime_get_sync(port->dev);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->ops->stop_rx(port);
priv->throttled = true;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
pm_runtime_get_sync(port->dev);
/* Synchronize UART_IER access against the console. */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
priv->throttled = false;
if (up->dma)
up->dma->rx_dma(up);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
port->read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
unsigned long flags;
/* Synchronize UART_IER access against the console. */
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
/*
* If the tx status is not DMA_COMPLETE, then this is a delayed
*/
if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
DMA_COMPLETE) {
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
return;
}
__dma_rx_do_complete(p);
omap_8250_rx_dma(p);
}
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
}
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
dma->tx_running = 0;
serial8250_set_THRI(p);
}
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
}
static int omap_8250_tx_dma(struct uart_8250_port *p)
return IRQ_HANDLED;
}
- spin_lock(&port->lock);
+ uart_port_lock(port);
status = serial_port_in(port, UART_LSR);
up = serial8250_get_port(priv->line);
if (up && omap8250_lost_context(up)) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
omap8250_restore_regs(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
omap_8250_rx_dma(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
priv->latency = priv->calc_latency;
if (port->suspended == 0 && port->dev) {
wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->mctrl &= ~TIOCM_OUT2;
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
}
writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
if (port->suspended == 0) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->mctrl |= TIOCM_OUT2;
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
mutex_unlock(&tport->mutex);
}
if (!p->em485)
return -ENOMEM;
+#ifndef CONFIG_SERIAL_8250_LEGACY_CONSOLE
+ if (uart_console(&p->port))
+ dev_warn(p->port.dev, "no atomic printing for rs485 consoles\n");
+#endif
+
hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
if (p->capabilities & UART_CAP_SLEEP) {
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&p->port.lock);
+ uart_port_lock_irq(&p->port);
if (p->capabilities & UART_CAP_EFR) {
lcr = serial_in(p, UART_LCR);
efr = serial_in(p, UART_EFR);
serial_out(p, UART_EFR, efr);
serial_out(p, UART_LCR, lcr);
}
- spin_unlock_irq(&p->port.lock);
+ uart_port_unlock_irq(&p->port);
}
serial8250_rpm_put(p);
}
-static void serial8250_clear_IER(struct uart_8250_port *up)
+/*
+ * Only to be used by write_atomic() and the legacy write(), which do not
+ * require port lock.
+ */
+static void __serial8250_clear_IER(struct uart_8250_port *up)
{
if (up->capabilities & UART_CAP_UUE)
serial_out(up, UART_IER, UART_IER_UUE);
serial_out(up, UART_IER, 0);
}
+static inline void serial8250_clear_IER(struct uart_8250_port *up)
+{
+ /* Port locked to synchronize UART_IER access against the console. */
+ lockdep_assert_held_once(&up->port.lock);
+
+ __serial8250_clear_IER(up);
+}
+
#ifdef CONFIG_SERIAL_8250_RSA
/*
* Attempts to turn on the RSA FIFO. Returns zero on failure.
{
if (up->port.type == PORT_RSA) {
if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
__enable_rsa(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
serial_out(up, UART_RSA_FRR, 0);
if (up->port.type == PORT_RSA &&
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
mode = serial_in(up, UART_RSA_MSR);
result = !(mode & UART_RSA_MSR_FIFO);
if (result)
up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
}
#endif /* CONFIG_SERIAL_8250_RSA */
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->capabilities = 0;
up->bugs = 0;
/*
* We failed; there's nothing here
*/
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
scratch2, scratch3);
goto out;
status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
serial8250_out_MCR(up, save_mcr);
if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
DEBUG_AUTOCONF("LOOP test failed (%02x) ",
status1);
goto out;
serial8250_clear_IER(up);
out_unlock:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/*
* Check if the device is a Fintek F81216A
probe_irq_off(probe_irq_on());
save_mcr = serial8250_in_MCR(up);
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
save_ier = serial_in(up, UART_IER);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
irqs = probe_irq_on();
UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
}
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial_out(up, UART_IER, UART_IER_ALL_INTR);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
serial_in(up, UART_LSR);
serial_in(up, UART_RX);
serial_in(up, UART_IIR);
serial8250_out_MCR(up, save_mcr);
/* Synchronize UART_IER access against the console. */
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial_out(up, UART_IER, save_ier);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
if (port->flags & UPF_FOURPORT)
outb_p(save_ICP, ICP);
unsigned long flags;
serial8250_rpm_get(p);
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
if (em485->active_timer == &em485->stop_tx_timer) {
p->rs485_stop_tx(p);
em485->active_timer = NULL;
em485->tx_stopped = true;
}
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
serial8250_rpm_put(p);
return HRTIMER_NORESTART;
struct uart_8250_port *p = em485->port;
unsigned long flags;
- spin_lock_irqsave(&p->port.lock, flags);
+ uart_port_lock_irqsave(&p->port, &flags);
if (em485->active_timer == &em485->start_tx_timer) {
__start_tx(&p->port);
em485->active_timer = NULL;
}
- spin_unlock_irqrestore(&p->port.lock, flags);
+ uart_port_unlock_irqrestore(&p->port, flags);
return HRTIMER_NORESTART;
}
if (iir & UART_IIR_NO_INT)
return 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = serial_lsr_in(up);
if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
struct uart_8250_port *up = up_to_u8250p(port);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
serial8250_tx_chars(up);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
iir = serial_port_in(port, UART_IIR);
serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
result = TIOCSER_TEMT;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_rpm_put(up);
unsigned long flags;
serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_port_out(port, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_rpm_put(up);
}
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->acr = 0;
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
serial_port_out(port, UART_LCR, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
if (port->type == PORT_DA830) {
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
serial_port_out(port, UART_IER, 0);
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
mdelay(10);
/* Enable Tx, Rx and free run mode */
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
iir = serial_port_in(port, UART_IIR);
serial_port_out(port, UART_IER, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (port->irqflags & IRQF_SHARED)
enable_irq(port->irq);
*/
serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (up->port.flags & UPF_FOURPORT) {
if (!up->port.irq)
up->port.mctrl |= TIOCM_OUT1;
}
dont_test_tx_en:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/*
* Clear the interrupt registers again for luck, and clear the
*
* Synchronize UART_IER access against the console.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->ier = 0;
serial_port_out(port, UART_IER, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
synchronize_irq(port->irq);
if (up->dma)
serial8250_release_dma(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (port->flags & UPF_FOURPORT) {
/* reset interrupts on the AST Fourport board */
inb((port->iobase & 0xfe0) | 0x1f);
port->mctrl &= ~TIOCM_OUT2;
serial8250_set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/*
* Disable break condition and FIFOs
quot = serial8250_get_divisor(port, baud, &frac);
serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_rpm_put(up);
out_unlock:
* Synchronize UART_IER access against the console.
*/
serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
up->lcr = cval; /* Save computed LCR */
serial_port_out(port, UART_FCR, up->fcr); /* set fcr */
}
serial8250_set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
serial8250_rpm_put(up);
/* Don't rewrite B0 */
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial8250_enable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
serial8250_disable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
}
}
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out(port, UART_TX, ch);
+
+ if (ch == '\n')
+ up->console_newline_needed = false;
+ else
+ up->console_newline_needed = true;
}
/*
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
+#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
/*
* Print a string to the serial port using the device FIFO
*
touch_nmi_watchdog();
if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
- serial8250_clear_IER(up);
+ __serial8250_clear_IER(up);
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
serial8250_modem_status(up);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
+#else
+bool serial8250_console_write_thread(struct uart_8250_port *up,
+ struct nbcon_write_context *wctxt)
+{
+ struct uart_8250_em485 *em485 = up->em485;
+ struct uart_port *port = &up->port;
+ bool done = false;
+ unsigned int ier;
+
+ touch_nmi_watchdog();
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return false;
+
+ /* First save IER then disable the interrupts. */
+ ier = serial_port_in(port, UART_IER);
+ serial8250_clear_IER(up);
+
+ /* Check scratch reg if port powered off during system sleep. */
+ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
+ serial8250_console_restore(up);
+ up->canary = 0;
+ }
+
+ if (em485) {
+ if (em485->tx_stopped)
+ up->rs485_start_tx(up);
+ mdelay(port->rs485.delay_rts_before_send);
+ }
+
+ if (nbcon_exit_unsafe(wctxt)) {
+ int len = READ_ONCE(wctxt->len);
+ int i;
+
+ /*
+ * Write out the message. Toggle unsafe for each byte in order
+ * to give another (higher priority) context the opportunity
+ * for a friendly takeover. If such a takeover occurs, this
+ * context must reacquire ownership in order to perform final
+ * actions (such as re-enabling the interrupts).
+ *
+ * IMPORTANT: wctxt->outbuf and wctxt->len are no longer valid
+ * after a reacquire so writing the message must be
+ * aborted.
+ */
+ for (i = 0; i < len; i++) {
+ if (!nbcon_enter_unsafe(wctxt)) {
+ nbcon_reacquire(wctxt);
+ break;
+ }
+
+ uart_console_write(port, wctxt->outbuf + i, 1, serial8250_console_putchar);
+
+ if (!nbcon_exit_unsafe(wctxt)) {
+ nbcon_reacquire(wctxt);
+ break;
+ }
+ }
+ done = (i == len);
+ } else {
+ nbcon_reacquire(wctxt);
+ }
+
+ while (!nbcon_enter_unsafe(wctxt))
+ nbcon_reacquire(wctxt);
+
+ /* Finally, wait for transmitter to become empty and restore IER. */
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
+ if (em485) {
+ mdelay(port->rs485.delay_rts_after_send);
+ if (em485->tx_stopped)
+ up->rs485_stop_tx(up);
+ }
+ serial_port_out(port, UART_IER, ier);
+
+ /*
+ * The receive handling will happen properly because the receive ready
+ * bit will still be set; it is not cleared on read. However, modem
+ * control will not, we must call it if we have saved something in the
+ * saved flags while processing with interrupts off.
+ */
+ if (up->msr_saved_flags)
+ serial8250_modem_status(up);
+
+ /* Success if no handover/takeover and message fully printed. */
+ return (nbcon_exit_unsafe(wctxt) && done);
+}
+
+bool serial8250_console_write_atomic(struct uart_8250_port *up,
+ struct nbcon_write_context *wctxt)
+{
+ struct uart_port *port = &up->port;
+ unsigned int ier;
+
+ /* Atomic console not supported for rs485 mode. */
+ if (up->em485)
+ return false;
+
+ touch_nmi_watchdog();
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return false;
+
+ /*
+ * First save IER then disable the interrupts. The special variant to
+ * clear IER is used because atomic printing may occur without holding
+ * the port lock.
+ */
+ ier = serial_port_in(port, UART_IER);
+ __serial8250_clear_IER(up);
+
+ /* Check scratch reg if port powered off during system sleep. */
+ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
+ serial8250_console_restore(up);
+ up->canary = 0;
+ }
+
+ if (up->console_newline_needed)
+ uart_console_write(port, "\n", 1, serial8250_console_putchar);
+ uart_console_write(port, wctxt->outbuf, wctxt->len, serial8250_console_putchar);
+
+ /* Finally, wait for transmitter to become empty and restore IER. */
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
+ serial_port_out(port, UART_IER, ier);
+
+ /* Success if no handover/takeover. */
+ return nbcon_exit_unsafe(wctxt);
+}
+#endif /* CONFIG_SERIAL_8250_LEGACY_CONSOLE */
static unsigned int probe_baud(struct uart_port *port)
{
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
+ struct uart_8250_port *up = up_to_u8250p(port);
int baud = 9600;
int bits = 8;
int parity = 'n';
if (!port->iobase && !port->membase)
return -ENODEV;
+ up->console_newline_needed = false;
+
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else if (probe)
isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask;
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
altera_jtaguart_rx_chars(port);
if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
altera_jtaguart_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_RETVAL(isr);
}
return ret;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Enable RX interrupts now */
port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable all interrupts now */
port->read_status_mask = 0;
writel(port->read_status_mask,
port->membase + ALTERA_JTAGUART_CONTROL_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
unsigned long flags;
u32 status;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (!altera_jtaguart_tx_space(port, &status)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
return; /* no connection activity */
}
cpu_relax();
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
}
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#else
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (!altera_jtaguart_tx_space(port, NULL)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
cpu_relax();
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
}
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state == -1)
pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
else
pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
altera_uart_update_ctrl_reg(pp);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void altera_uart_set_termios(struct uart_port *port,
tty_termios_copy_hw(termios, old);
tty_termios_encode_baud_rate(termios, baud, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/*
* FIXME: port->read_status_mask and port->ignore_status_mask
isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (isr & ALTERA_UART_STATUS_RRDY_MSK)
altera_uart_rx_chars(port);
if (isr & ALTERA_UART_STATUS_TRDY_MSK)
altera_uart_tx_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_RETVAL(isr);
}
}
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Enable RX interrupts now */
pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
altera_uart_update_ctrl_reg(pp);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
struct altera_uart *pp = container_of(port, struct altera_uart, port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable all interrupts now */
pp->imr = 0;
altera_uart_update_ctrl_reg(pp);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (port->irq)
free_irq(port->irq, port);
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
- spin_lock(&port->lock);
+ uart_port_lock(port);
status = readb(port->membase + UART010_IIR);
if (status) {
handled = 1;
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_RETVAL(handled);
}
unsigned long flags;
unsigned int lcr_h;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
lcr_h = readb(port->membase + UART010_LCRH);
if (break_state == -1)
lcr_h |= UART01x_LCRH_BRK;
else
lcr_h &= ~UART01x_LCRH_BRK;
writel(lcr_h, port->membase + UART010_LCRH);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int pl010_startup(struct uart_port *port)
if (port->fifosize > 1)
lcr_h |= UART01x_LCRH_FEN;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
writel(lcr_h, port->membase + UART010_LCRH);
writel(old_cr, port->membase + UART010_CR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
pl010_enable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
pl010_disable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
}
}
flag = TTY_FRAME;
}
- spin_unlock(&uap->port.lock);
+ uart_port_unlock(&uap->port);
sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
- spin_lock(&uap->port.lock);
+ uart_port_lock(&uap->port);
if (!sysrq)
uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
unsigned long flags;
u16 dmacr;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
if (uap->dmatx.queued)
dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
dmatx->len, DMA_TO_DEVICE);
if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
uart_circ_empty(&uap->port.state->xmit)) {
uap->dmatx.queued = false;
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
return;
}
*/
pl011_start_tx_pio(uap);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
/*
* routine to flush out the secondary DMA buffer while
* we immediately trigger the next DMA job.
*/
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
/*
* Rx data can be taken by the UART interrupts during
* the DMA irq handler. So we check the residue here.
ret = pl011_dma_rx_trigger_dma(uap);
pl011_dma_rx_chars(uap, pending, lastbuf, false);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
/*
* Do this check after we picked the DMA chars so we don't
* get some IRQ immediately from RX.
if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
> uap->dmarx.poll_timeout) {
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
pl011_dma_rx_stop(uap);
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
uap->dmarx.running = false;
dmaengine_terminate_all(rxchan);
while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
cpu_relax();
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
pl011_write(uap->dmacr, uap, REG_DMACR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
if (uap->using_tx_dma) {
/* In theory, this should already be done by pl011_dma_flush_buffer */
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pl011_stop_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void pl011_enable_ms(struct uart_port *port)
{
pl011_fifo_to_tty(uap);
- spin_unlock(&uap->port.lock);
+ uart_port_unlock(&uap->port);
tty_flip_buffer_push(&uap->port.state->port);
/*
* If we were temporarily out of DMA mode for a while,
#endif
}
}
- spin_lock(&uap->port.lock);
+ uart_port_lock(&uap->port);
}
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
status = pl011_read(uap, REG_RIS) & uap->im;
if (status) {
do {
handled = 1;
}
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
return IRQ_RETVAL(handled);
}
unsigned long flags;
unsigned int lcr_h;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
lcr_h = pl011_read(uap, REG_LCRH_TX);
if (break_state == -1)
lcr_h |= UART01x_LCRH_BRK;
else
lcr_h &= ~UART01x_LCRH_BRK;
pl011_write(lcr_h, uap, REG_LCRH_TX);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
#ifdef CONFIG_CONSOLE_POLL
unsigned long flags;
unsigned int i;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
/* Clear out any spuriously appearing RX interrupts */
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
static void pl011_unthrottle_rx(struct uart_port *port)
struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
unsigned long flags;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
uap->im = UART011_RTIM;
if (!pl011_dma_rx_running(uap))
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
static int pl011_startup(struct uart_port *port)
pl011_write(uap->vendor->ifls, uap, REG_IFLS);
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
cr = pl011_read(uap, REG_CR);
cr &= UART011_CR_RTS | UART011_CR_DTR;
pl011_write(cr, uap, REG_CR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
/*
* initialise the old status of the modem signals
unsigned int cr;
uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
cr = pl011_read(uap, REG_CR);
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
pl011_write(cr, uap, REG_CR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
/*
* disable break condition and fifos
static void pl011_disable_interrupts(struct uart_amba_port *uap)
{
- spin_lock_irq(&uap->port.lock);
+ uart_port_lock_irq(&uap->port);
/* mask all interrupts and clear all pending ones */
uap->im = 0;
pl011_write(uap->im, uap, REG_IMSC);
pl011_write(0xffff, uap, REG_ICR);
- spin_unlock_irq(&uap->port.lock);
+ uart_port_unlock_irq(&uap->port);
}
static void pl011_shutdown(struct uart_port *port)
bits = tty_get_frame_size(termios->c_cflag);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Update the per-port timeout.
old_cr |= UART011_CR_RXE;
pl011_write(old_cr, uap, REG_CR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void
termios->c_cflag &= ~(CMSPAR | CRTSCTS);
termios->c_cflag |= CS8 | CLOCAL;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, CS8, uap->fixed_baud);
pl011_setup_status_masks(port, termios);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *pl011_type(struct uart_port *port)
clk_enable(uap->clk);
- local_irq_save(flags);
- if (uap->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = spin_trylock(&uap->port.lock);
+ if (uap->port.sysrq || oops_in_progress)
+ locked = uart_port_trylock_irqsave(&uap->port, &flags);
else
- spin_lock(&uap->port.lock);
+ uart_port_lock_irqsave(&uap->port, &flags);
/*
* First save the CR then disable the interrupts
pl011_write(old_cr, uap, REG_CR);
if (locked)
- spin_unlock(&uap->port.lock);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
clk_disable(uap->clk);
}
struct uart_port *port = dev_id;
unsigned int status;
- spin_lock(&port->lock);
+ uart_port_lock(port);
status = UART_GET_STATUS(port);
if (status & UART_STATUS_DR)
if (status & UART_STATUS_THE)
apbuart_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
if (termios->c_cflag & CRTSCTS)
cr |= UART_CTRL_FL;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
UART_PUT_SCAL(port, quot);
UART_PUT_CTRL(port, cr);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *apbuart_type(struct uart_port *port)
unsigned long flags;
unsigned int rdata;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
}
container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
else
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/* disable the UART */
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
if ((status & AR933X_UART_CS_HOST_INT) == 0)
return IRQ_NONE;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
status = ar933x_uart_read(up, AR933X_UART_INT_REG);
status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
ar933x_uart_tx_chars(up);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
return IRQ_HANDLED;
}
if (ret)
return ret;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/* Enable HOST interrupts */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
/* Enable RX interrupts */
ar933x_uart_start_rx_interrupt(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ locked = uart_port_trylock(&up->port);
else
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
/*
* First save the IER then disable the interrupts
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
if (locked)
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
local_irq_restore(flags);
}
if (status & RXIENB) {
/* already in ISR, no need of xx_irqsave */
- spin_lock(&port->lock);
+ uart_port_lock(port);
arc_serial_rx_chars(port, status);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
if ((status & TXIENB) && (status & TXEMPTY)) {
*/
UART_TX_IRQ_DISABLE(port);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!uart_tx_stopped(port))
arc_serial_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
return IRQ_HANDLED;
uartl = hw_val & 0xFF;
uarth = (hw_val >> 8) & 0xFF;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
UART_ALL_IRQ_DISABLE(port);
uart_update_timeout(port, new->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *arc_serial_type(struct uart_port *port)
struct uart_port *port = &arc_uart_ports[co->index].port;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_console_write(port, s, count, arc_serial_console_putchar);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static struct console arc_console = {
struct dma_chan *chan = atmel_port->chan_tx;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (chan)
dmaengine_terminate_all(chan);
atmel_port->tx_done_mask);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void atmel_release_tx_dma(struct uart_port *port)
struct uart_port *port = &atmel_port->uart;
/* The interrupt handler does not take the lock */
- spin_lock(&port->lock);
+ uart_port_lock(port);
atmel_port->schedule_rx(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
static void atmel_tasklet_tx_func(struct tasklet_struct *t)
struct uart_port *port = &atmel_port->uart;
/* The interrupt handler does not take the lock */
- spin_lock(&port->lock);
+ uart_port_lock(port);
atmel_port->schedule_tx(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
static void atmel_init_property(struct atmel_uart_port *atmel_port,
} else
mode |= ATMEL_US_PAR_NONE;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->read_status_mask = ATMEL_US_OVRE;
if (termios->c_iflag & INPCK)
else
atmel_disable_ms(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
atmel_enable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
atmel_disable_ms(port);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
}
}
unsigned long flags;
unsigned int val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = bcm_uart_readl(port, UART_CTL_REG);
if (ctl)
val &= ~UART_CTL_XMITBRK_MASK;
bcm_uart_writel(port, val, UART_CTL_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
unsigned int irqstat;
port = dev_id;
- spin_lock(&port->lock);
+ uart_port_lock(port);
irqstat = bcm_uart_readl(port, UART_IR_REG);
if (irqstat & UART_RX_INT_STAT)
estat & UART_EXTINP_DCD_MASK);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
bcm_uart_writel(port, 0, UART_IR_REG);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
bcm_uart_disable(port);
bcm_uart_flush(port);
unsigned long flags;
int tries;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Drain the hot tub fully before we power it off for the winter. */
for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
uart_update_timeout(port, new->c_cflag, baud);
bcm_uart_enable(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
/* bcm_uart_interrupt() already took the lock */
locked = 0;
} else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
} else {
- spin_lock(&port->lock);
+ uart_port_lock(port);
locked = 1;
}
wait_for_xmitr(port);
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
if ((termios->c_cflag & CREAD) == 0)
port->read_status_mask &= ~BD_SC_EMPTY;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (IS_SMC(pinfo)) {
unsigned int bits = tty_get_frame_size(termios->c_cflag);
clk_set_rate(pinfo->clk, baud);
else
cpm_setbrg(pinfo->brg - 1, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *cpm_uart_type(struct uart_port *port)
cpm_uart_early_write(pinfo, s, count, true);
local_irq_restore(flags);
} else {
- spin_lock_irqsave(&pinfo->port.lock, flags);
+ uart_port_lock_irqsave(&pinfo->port, &flags);
cpm_uart_early_write(pinfo, s, count, true);
- spin_unlock_irqrestore(&pinfo->port.lock, flags);
+ uart_port_unlock_irqrestore(&pinfo->port, flags);
}
}
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (1) {
u8 status, ch, ch_flag;
ch_flag);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
tty_flip_buffer_push(&port->state->port);
}
if (digicolor_uart_tx_full(port))
return;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (port->x_char) {
writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
uart_write_wakeup(port);
out:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
| UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *digicolor_uart_type(struct uart_port *port)
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_console_write(port, c, n, digicolor_uart_console_putchar);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Wait for transmitter to become empty */
do {
}
/* If nothing to do or stopped or hardware stopped. */
if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
- spin_lock(&dport->port.lock);
+ uart_port_lock(&dport->port);
dz_stop_tx(&dport->port);
- spin_unlock(&dport->port.lock);
+ uart_port_unlock(&dport->port);
return;
}
/* Are we are done. */
if (uart_circ_empty(xmit)) {
- spin_lock(&dport->port.lock);
+ uart_port_lock(&dport->port);
dz_stop_tx(&dport->port);
- spin_unlock(&dport->port.lock);
+ uart_port_unlock(&dport->port);
}
}
return ret;
}
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
/* Enable interrupts. */
tmp = dz_in(dport, DZ_CSR);
tmp |= DZ_RIE | DZ_TIE;
dz_out(dport, DZ_CSR, tmp);
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
return 0;
}
int irq_guard;
u16 tmp;
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
dz_stop_tx(&dport->port);
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
irq_guard = atomic_add_return(-1, &mux->irq_guard);
if (!irq_guard) {
unsigned long flags;
unsigned short tmp, mask = 1 << dport->port.line;
- spin_lock_irqsave(&uport->lock, flags);
+ uart_port_lock_irqsave(uport, &flags);
tmp = dz_in(dport, DZ_TCR);
if (break_state)
tmp |= mask;
else
tmp &= ~mask;
dz_out(dport, DZ_TCR, tmp);
- spin_unlock_irqrestore(&uport->lock, flags);
+ uart_port_unlock_irqrestore(uport, flags);
}
static int dz_encode_baud_rate(unsigned int baud)
if (termios->c_cflag & CREAD)
cflag |= DZ_RXENAB;
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
uart_update_timeout(uport, termios->c_cflag, baud);
if (termios->c_iflag & IGNBRK)
dport->port.ignore_status_mask |= DZ_BREAK;
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
}
/*
struct dz_port *dport = to_dport(uport);
unsigned long flags;
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
if (state < 3)
dz_start_tx(&dport->port);
else
dz_stop_tx(&dport->port);
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
}
unsigned short csr, tcr, trdy, mask;
int loops = 10000;
- spin_lock_irqsave(&dport->port.lock, flags);
+ uart_port_lock_irqsave(&dport->port, &flags);
csr = dz_in(dport, DZ_CSR);
dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
tcr = dz_in(dport, DZ_TCR);
mask = tcr;
dz_out(dport, DZ_TCR, mask);
iob();
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ uart_port_unlock_irqrestore(&dport->port, flags);
do {
trdy = dz_in(dport, DZ_CSR);
struct circ_buf *xmit = &sport->state->xmit;
unsigned long flags;
- spin_lock_irqsave(&sport->lock, flags);
+ uart_port_lock_irqsave(sport, &flags);
if (sport->x_char) {
linflex_put_char(sport, sport->x_char);
linflex_transmit_buffer(sport);
out:
- spin_unlock_irqrestore(&sport->lock, flags);
+ uart_port_unlock_irqrestore(sport, flags);
return IRQ_HANDLED;
}
unsigned char rx;
bool brk;
- spin_lock_irqsave(&sport->lock, flags);
+ uart_port_lock_irqsave(sport, &flags);
status = readl(sport->membase + UARTSR);
while (status & LINFLEXD_UARTSR_RMB) {
}
}
- spin_unlock_irqrestore(&sport->lock, flags);
+ uart_port_unlock_irqrestore(sport, flags);
tty_flip_buffer_push(port);
int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
linflex_setup_watermark(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
DRIVER_NAME, port);
unsigned long ier;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* disable interrupts */
ier = readl(port->membase + LINIER);
ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
writel(ier, port->membase + LINIER);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
devm_free_irq(port->dev, port->irq, port);
}
cr &= ~LINFLEXD_UARTCR_PCE;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->read_status_mask = 0;
writel(cr1, port->membase + LINCR1);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *linflex_type(struct uart_port *port)
if (sport->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&sport->lock, flags);
+ locked = uart_port_trylock_irqsave(sport, &flags);
else
- spin_lock_irqsave(&sport->lock, flags);
+ uart_port_lock_irqsave(sport, &flags);
linflex_string_write(sport, s, count);
if (locked)
- spin_unlock_irqrestore(&sport->lock, flags);
+ uart_port_unlock_irqrestore(sport, flags);
}
/*
struct dma_chan *chan = sport->dma_tx_chan;
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (!sport->dma_tx_in_progress) {
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return;
}
uart_xmit_advance(&sport->port, sport->dma_tx_bytes);
sport->dma_tx_in_progress = false;
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
return;
}
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (!lpuart_stopped_or_empty(&sport->port))
lpuart_dma_tx(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
sport->port.fifosize = 0;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* Disable Rx & Tx */
writeb(0, sport->port.membase + UARTCR2);
/* Enable Rx and Tx */
writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return 0;
}
sport->port.fifosize = 0;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* Disable Rx & Tx */
lpuart32_write(&sport->port, 0, UARTCTRL);
/* Enable Rx and Tx */
lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return 0;
}
static void lpuart_txint(struct lpuart_port *sport)
{
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
lpuart_transmit_buffer(sport);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
}
static void lpuart_rxint(struct lpuart_port *sport)
struct tty_port *port = &sport->port.state->port;
unsigned char rx, sr;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
flg = TTY_NORMAL;
static void lpuart32_txint(struct lpuart_port *sport)
{
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
lpuart32_transmit_buffer(sport);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
}
static void lpuart32_rxint(struct lpuart_port *sport)
unsigned long rx, sr;
bool is_break;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
flg = TTY_NORMAL;
async_tx_ack(sport->dma_rx_desc);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
if (dmastat == DMA_ERROR) {
dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return;
}
dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
DMA_FROM_DEVICE);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
tty_flip_buffer_push(port);
if (!sport->dma_idle_int)
mod_timer(&sport->lpuart_timer,
jiffies + sport->dma_rx_timeout);
- if (spin_trylock_irqsave(&sport->port.lock, flags)) {
+ if (uart_port_trylock_irqsave(&sport->port, &flags)) {
sport->last_residue = state.residue;
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
}
{
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
lpuart_setup_watermark_enable(sport);
lpuart_rx_dma_startup(sport);
lpuart_tx_dma_startup(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static int lpuart_startup(struct uart_port *port)
{
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
lpuart32_hw_disable(sport);
lpuart32_setup_watermark_enable(sport);
lpuart32_configure(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static int lpuart32_startup(struct uart_port *port)
unsigned char temp;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* disable Rx/Tx and interrupts */
temp = readb(port->membase + UARTCR2);
UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
writeb(temp, port->membase + UARTCR2);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
lpuart_dma_shutdown(sport);
}
unsigned long temp;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* clear status */
temp = lpuart32_read(&sport->port, UARTSTAT);
UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
lpuart32_write(port, temp, UARTCTRL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
lpuart_dma_shutdown(sport);
}
if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
sport->lpuart_dma_rx_use = false;
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static void __lpuart32_serial_setbrg(struct uart_port *port,
if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
sport->lpuart_dma_rx_use = false;
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static const char *lpuart_type(struct uart_port *port)
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
else
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* first save CR2 and then disable interrupts */
cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
writeb(old_cr2, sport->port.membase + UARTCR2);
if (locked)
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static void
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
else
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* first save CR2 and then disable interrupts */
cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
lpuart32_write(&sport->port, old_cr, UARTCTRL);
if (locked)
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
/*
uart_suspend_port(&lpuart_reg, &sport->port);
if (lpuart_uport_is_active(sport)) {
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (lpuart_is_32(sport)) {
/* disable Rx/Tx and interrupts */
temp = lpuart32_read(&sport->port, UARTCTRL);
temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
writeb(temp, sport->port.membase + UARTCR2);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
if (sport->lpuart_dma_rx_use) {
/*
lpuart_dma_rx_free(&sport->port);
/* Disable Rx DMA to use UART port as wakeup source */
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (lpuart_is_32(sport)) {
temp = lpuart32_read(&sport->port, UARTBAUD);
lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
writeb(readb(sport->port.membase + UARTCR5) &
~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
if (sport->lpuart_dma_tx_use) {
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (lpuart_is_32(sport)) {
temp = lpuart32_read(&sport->port, UARTBAUD);
temp &= ~UARTBAUD_TDMAE;
temp &= ~UARTCR5_TDMAS;
writeb(temp, sport->port.membase + UARTCR5);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
sport->dma_tx_in_progress = false;
dmaengine_terminate_sync(sport->dma_tx_chan);
}
char delta_status;
unsigned char status;
- spin_lock(&icom_port->uart_port.lock);
+ uart_port_lock(&icom_port->uart_port);
/*modem input register */
status = readb(&icom_port->dram->isr);
port.delta_msr_wait);
old_status = status;
}
- spin_unlock(&icom_port->uart_port.lock);
+ uart_port_unlock(&icom_port->uart_port);
}
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
struct icom_port *icom_port)
{
- spin_lock(&icom_port->uart_port.lock);
+ uart_port_lock(&icom_port->uart_port);
trace(icom_port, "INTERRUPT", port_int_reg);
if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
if (port_int_reg & INT_RCV_COMPLETED)
recv_interrupt(port_int_reg, icom_port);
- spin_unlock(&icom_port->uart_port.lock);
+ uart_port_unlock(&icom_port->uart_port);
}
static irqreturn_t icom_interrupt(int irq, void *dev_id)
int ret;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
SA_FLAGS_READY_TO_XMIT)
ret = TIOCSER_TEMT;
else
ret = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return ret;
}
/* wait .1 sec to send char */
for (index = 0; index < 10; index++) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
xdata = readb(&icom_port->dram->xchar);
if (xdata == 0x00) {
trace(icom_port, "QUICK_WRITE", 0);
/* flush write operation */
xdata = readb(&icom_port->dram->xchar);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
break;
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
msleep(10);
}
}
unsigned char cmdReg;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
trace(icom_port, "BREAK", 0);
cmdReg = readb(&icom_port->dram->CmdReg);
if (break_state == -1) {
} else {
writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int icom_open(struct uart_port *port)
unsigned long offset;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
trace(icom_port, "CHANGE_SPEED", 0);
cflag = termios->c_cflag;
trace(icom_port, "XR_ENAB", 0);
writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *icom_type(struct uart_port *port)
unsigned long flags;
u32 ucr1;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
imx_uart_writel(sport, ucr4, UCR4);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
/* called with port.lock taken and irqs off */
struct imx_port *sport = dev_id;
irqreturn_t ret;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
ret = __imx_uart_rtsint(irq, dev_id);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return ret;
}
{
struct imx_port *sport = dev_id;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
imx_uart_transmit_buffer(sport);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return IRQ_HANDLED;
}
struct imx_port *sport = dev_id;
irqreturn_t ret;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
ret = __imx_uart_rxint(irq, dev_id);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return ret;
}
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
irqreturn_t ret = IRQ_NONE;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
usr1 = imx_uart_readl(sport, USR1);
usr2 = imx_uart_readl(sport, USR2);
ret = IRQ_HANDLED;
}
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return ret;
}
unsigned long flags;
u32 ucr1;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
imx_uart_writel(sport, ucr1, UCR1);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
/*
unsigned long flags;
if (sport->port.state) {
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
imx_uart_mctrl_check(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
if (status == DMA_ERROR) {
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
imx_uart_clear_rx_errors(sport);
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return;
}
r_bytes = rx_ring->head - rx_ring->tail;
/* If we received something, check for 0xff flood */
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
dma_is_inited = 1;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/* Reset fifo's and state machines */
imx_uart_soft_reset(sport);
imx_uart_disable_loopback_rs485(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return 0;
}
sport->dma_is_rxing = 0;
}
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
imx_uart_stop_tx(port);
imx_uart_stop_rx(port);
imx_uart_disable_dma(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
imx_uart_dma_exit(sport);
}
mctrl_gpio_disable_ms(sport->gpios);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
imx_uart_writel(sport, ucr2, UCR2);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
/*
* Stop our timer.
* Disable all interrupts, port and break condition.
*/
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
clk_disable_unprepare(sport->clk_per);
clk_disable_unprepare(sport->clk_ipg);
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/*
* Read current UCR2 and save it for future use, then clear all the bits
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_uart_enable_ms(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static const char *imx_uart_type(struct uart_port *port)
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/*
* Be careful about the order of enabling bits here. First enable the
imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return 0;
}
if (sport->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
else
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
/*
* First, save UCR1/2/3 and then disable interrupts
imx_uart_ucrs_restore(sport, &old_ucr);
if (locked)
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
/*
struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (sport->tx_state == WAIT_AFTER_RTS)
imx_uart_start_tx(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return HRTIMER_NORESTART;
}
struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (sport->tx_state == WAIT_AFTER_SEND)
imx_uart_stop_tx(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return HRTIMER_NORESTART;
}
{
unsigned long flags;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
if (!sport->context_saved) {
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
return;
}
imx_uart_writel(sport, sport->saved_reg[2], UCR3);
imx_uart_writel(sport, sport->saved_reg[3], UCR4);
sport->context_saved = false;
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static void imx_uart_save_context(struct imx_port *sport)
unsigned long flags;
/* Save necessary regs */
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
sport->context_saved = true;
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
unsigned char r3;
bool push = false;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
r3 = read_zsreg(channel, R3);
/* Channel A */
if (r3 & CHATxIP)
ip22zilog_transmit_chars(up, channel);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
if (push)
tty_flip_buffer_push(&up->port.state->port);
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
push = false;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
ZSDELAY();
if (r3 & CHBTxIP)
ip22zilog_transmit_chars(up, channel);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
if (push)
tty_flip_buffer_push(&up->port.state->port);
unsigned char status;
unsigned int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = ip22zilog_read_channel_status(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (status & Tx_BUF_EMP)
ret = TIOCSER_TEMT;
else
clear_bits |= SND_BRK;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != up->curregs[R5]) {
write_zsreg(channel, R5, up->curregs[R5]);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
if (ZS_IS_CONS(up))
return 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
__ip22zilog_startup(up);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
if (ZS_IS_CONS(up))
return;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
channel = ZILOG_CHANNEL_FROM_PORT(port);
up->curregs[R5] &= ~SND_BRK;
ip22zilog_maybe_update_regs(up, channel);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Shared by TTY driver and serial console setup. The port lock is held
baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static const char *ip22zilog_type(struct uart_port *port)
struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
uart_console_write(&up->port, s, count, ip22zilog_put_char);
udelay(2);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init ip22zilog_console_setup(struct console *con, char *options)
printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->curregs[R15] |= BRKIE;
__ip22zilog_startup(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
/* Parse any modem signal changes */
jsm_dbg(INTR, &ch->ch_bd->pci_dev,
"MOD_STAT: sending to parse_modem_sigs\n");
- spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
+ uart_port_lock_irqsave(&ch->uart_port, &lock_flags);
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
- spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
+ uart_port_unlock_irqrestore(&ch->uart_port, lock_flags);
}
}
container_of(port, struct jsm_channel, uart_port);
struct ktermios *termios;
- spin_lock_irqsave(&port->lock, lock_flags);
+ uart_port_lock_irqsave(port, &lock_flags);
termios = &port->state->port.tty->termios;
if (ch == termios->c_cc[VSTART])
channel->ch_bd->bd_ops->send_start_character(channel);
if (ch == termios->c_cc[VSTOP])
channel->ch_bd->bd_ops->send_stop_character(channel);
- spin_unlock_irqrestore(&port->lock, lock_flags);
+ uart_port_unlock_irqrestore(port, lock_flags);
}
static void jsm_tty_stop_rx(struct uart_port *port)
struct jsm_channel *channel =
container_of(port, struct jsm_channel, uart_port);
- spin_lock_irqsave(&port->lock, lock_flags);
+ uart_port_lock_irqsave(port, &lock_flags);
if (break_state == -1)
channel->ch_bd->bd_ops->send_break(channel);
else
channel->ch_bd->bd_ops->clear_break(channel);
- spin_unlock_irqrestore(&port->lock, lock_flags);
+ uart_port_unlock_irqrestore(port, lock_flags);
}
static int jsm_tty_open(struct uart_port *port)
channel->ch_cached_lsr = 0;
channel->ch_stops_sent = 0;
- spin_lock_irqsave(&port->lock, lock_flags);
+ uart_port_lock_irqsave(port, &lock_flags);
termios = &port->state->port.tty->termios;
channel->ch_c_cflag = termios->c_cflag;
channel->ch_c_iflag = termios->c_iflag;
jsm_carrier(channel);
channel->ch_open_count++;
- spin_unlock_irqrestore(&port->lock, lock_flags);
+ uart_port_unlock_irqrestore(port, lock_flags);
jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
return 0;
struct jsm_channel *channel =
container_of(port, struct jsm_channel, uart_port);
- spin_lock_irqsave(&port->lock, lock_flags);
+ uart_port_lock_irqsave(port, &lock_flags);
channel->ch_c_cflag = termios->c_cflag;
channel->ch_c_iflag = termios->c_iflag;
channel->ch_c_oflag = termios->c_oflag;
channel->ch_bd->bd_ops->param(channel);
jsm_carrier(channel);
- spin_unlock_irqrestore(&port->lock, lock_flags);
+ uart_port_unlock_irqrestore(port, lock_flags);
}
static const char *jsm_tty_type(struct uart_port *port)
* if polling, the context would be "in_serving_softirq", so use
* irq[save|restore] spin_lock variants to cover all possibilities
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
if (isr & EV_RX)
liteuart_rx_chars(port);
if (isr & EV_TX)
liteuart_tx_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_RETVAL(isr);
}
}
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* only enabling rx irqs during startup */
liteuart_update_irq_reg(port, true, EV_RX);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (!port->irq) {
timer_setup(&uart->timer, liteuart_timer, 0);
struct liteuart_port *uart = to_liteuart_port(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (port->irq)
free_irq(port->irq, port);
unsigned int baud;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* update baudrate */
baud = uart_get_baud_rate(port, new, old, 0, 460800);
uart_update_timeout(port, new->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *liteuart_type(struct uart_port *port)
uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
port = &uart->port;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_console_write(port, s, count, liteuart_putchar);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int liteuart_console_setup(struct console *co, char *options)
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ locked = uart_port_trylock(&up->port);
else
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
wait_for_xmit_empty(&up->port);
if (locked)
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
local_irq_restore(flags);
}
struct tty_port *tport = &port->state->port;
u32 status;
- spin_lock(&port->lock);
+ uart_port_lock(port);
/* Read UART status and clear latched interrupts */
status = readl(LPC32XX_HSUART_IIR(port->membase));
__serial_lpc32xx_tx(port);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
if (break_state != 0)
tmp |= LPC32XX_HSU_BREAK;
else
tmp &= ~LPC32XX_HSU_BREAK;
writel(tmp, LPC32XX_HSUART_CTRL(port->membase));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* port->lock is not held. */
unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
__serial_uart_flush(port);
lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
retval = request_irq(port->irq, serial_lpc32xx_interrupt,
0, MODNAME, port);
u32 tmp;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B |
LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B;
lpc32xx_loopback_set(port->mapbase, 1); /* go to loopback mode */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
quot = __serial_get_clock_div(port->uartclk, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Ignore characters? */
tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
if (uart_handle_sysrq_char(&up->port, ch))
continue;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
fsr = serial_in(up, MA35_FSR_REG);
} while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
tty_flip_buffer_push(&up->port.state->port);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
}
static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
unsigned long flags;
u32 lcr;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
lcr = serial_in(up, MA35_LCR_REG);
if (break_state != 0)
lcr |= MA35_LCR_BREAK;
else
lcr &= ~MA35_LCR_BREAK;
serial_out(up, MA35_LCR_REG, lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int ma35d1serial_startup(struct uart_port *port)
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
if (termios->c_iflag & INPCK)
serial_out(up, MA35_LCR_REG, lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static const char *ma35d1serial_type(struct uart_port *port)
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
serial_out(up, MA35_IER_REG, ier);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init ma35d1serial_console_setup(struct console *co, char *options)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state == -1)
writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
else
writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/****************************************************************************/
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Reset UART, get it into known state... */
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
pp->imr = MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable all interrupts now */
pp->imr = 0;
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/****************************************************************************/
mr2 |= MCFUART_MR2_TXCTS;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
mr2 |= MCFUART_MR2_TXRTS;
port->membase + MCFUART_UCSR);
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
port->membase + MCFUART_UCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/****************************************************************************/
isr = readb(port->membase + MCFUART_UISR) & pp->imr;
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (isr & MCFUART_UIR_RXREADY) {
mcf_rx_chars(pp);
ret = IRQ_HANDLED;
mcf_tx_chars(pp);
ret = IRQ_HANDLED;
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return ret;
}
if (!irq_id)
goto out;
- spin_lock(&port->lock);
+ uart_port_lock(port);
/* It's save to write to IIR[7:6] RXC[9:8] */
iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
handled = true;
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
out:
return IRQ_RETVAL(handled);
}
baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
}
static const char *men_z135_type(struct uart_port *port)
free_irq(port->irq, port);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = readl(port->membase + AML_UART_CONTROL);
val &= ~AML_UART_RX_EN;
val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
writel(val, port->membase + AML_UART_CONTROL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void meson_uart_start_tx(struct uart_port *port)
{
struct uart_port *port = (struct uart_port *)dev_id;
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY))
meson_receive_chars(port);
meson_uart_start_tx(port);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
u32 val;
int ret = 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_CLEAR_ERR;
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
writel(val, port->membase + AML_UART_MISC);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = request_irq(port->irq, meson_uart_interrupt, 0,
port->name, port);
unsigned long flags;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
cflags = termios->c_cflag;
iflags = termios->c_iflag;
AML_UART_FRAME_ERR;
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int meson_uart_verify_port(struct uart_port *port,
u32 c;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
c = NO_POLL_CHAR;
else
c = readl(port->membase + AML_UART_RFIFO);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return c;
}
u32 reg;
int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Wait until FIFO is empty or timeout */
ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
out:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_CONSOLE_POLL */
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
} else {
- spin_lock(&port->lock);
+ uart_port_lock(port);
locked = 1;
}
writel(val, port->membase + AML_UART_CONTROL);
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
{
struct uart_port *port = dev_id;
- spin_lock(&port->lock);
+ uart_port_lock(port);
mlb_usio_rx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
{
struct uart_port *port = dev_id;
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
mlb_usio_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
escr = readb(port->membase + MLB_USIO_REG_ESCR);
if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
escr |= MLB_USIO_ESCR_FLWEN;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
writeb(0, port->membase + MLB_USIO_REG_SCR);
writeb(escr, port->membase + MLB_USIO_REG_ESCR);
writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
writeb(MLB_USIO_SCR_TXE | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE |
MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
else
quot = 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF |
MLB_USIO_SSR_TDRE;
writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE |
MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *mlb_usio_type(struct uart_port *port)
mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (ctl == -1)
psc_ops->command(port, MPC52xx_PSC_START_BRK);
else
psc_ops->command(port, MPC52xx_PSC_STOP_BRK);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int
}
/* Get the lock */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Do our best to flush TX & RX, so we don't lose anything */
/* But we don't wait indefinitely ! */
psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
/* We're all set, release the lock */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *
struct uart_port *port = dev_id;
irqreturn_t ret;
- spin_lock(&port->lock);
+ uart_port_lock(port);
ret = psc_ops->handle_irq(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return ret;
}
if (unlikely(!(irqflag & UARTn_INT_RX)))
return IRQ_NONE;
- spin_lock(&port->lock);
+ uart_port_lock(port);
mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT);
mps2_uart_rx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
if (unlikely(!(irqflag & UARTn_INT_TX)))
return IRQ_NONE;
- spin_lock(&port->lock);
+ uart_port_lock(port);
mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT);
mps2_uart_tx_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
struct uart_port *port = data;
u8 irqflag = mps2_uart_read8(port, UARTn_INT);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (irqflag & UARTn_INT_RX_OVERRUN) {
struct tty_port *tport = &port->state->port;
handled = IRQ_HANDLED;
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return handled;
}
bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
unsigned int count;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Already stopped */
if (!dma->count)
msm_handle_tx(port);
done:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
unsigned long flags;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Already stopped */
if (!dma->count)
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (!sysrq)
tty_insert_flip_char(tport, dma->virt[i], flag);
}
msm_start_rx_dma(msm_port);
done:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (count)
tty_flip_buffer_push(tport);
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
sysrq = uart_handle_sysrq_char(port, buf[i]);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!sysrq)
tty_insert_flip_char(tport, buf[i], flag);
}
else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
sysrq = uart_handle_sysrq_char(port, c);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!sysrq)
tty_insert_flip_char(tport, c, flag);
}
unsigned int misr;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
misr = msm_read(port, MSM_UART_MISR);
msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
msm_handle_delta_cts(port);
msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
unsigned long flags, rate;
flags = *saved_flags;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
entry = msm_find_best_baud(port, baud, &rate);
clk_set_rate(msm_port->clk, rate);
baud = rate / 16 / entry->divisor;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
*saved_flags = flags;
port->uartclk = rate;
unsigned long flags;
unsigned int baud, mr;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (dma->chan) /* Terminate if any */
msm_stop_dma(port, dma);
/* Try to use DMA */
msm_start_rx_dma(msm_port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *msm_type(struct uart_port *port)
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
else
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (is_uartdm)
msm_reset_dm_count(port, count);
}
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
unsigned long flags;
unsigned int st;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
st = readl(port->membase + UART_STAT);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
}
unsigned int ctl;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ctl = readl(port->membase + UART_CTRL(port));
if (brk == -1)
ctl |= CTRL_SND_BRK_SEQ;
else
ctl &= ~CTRL_SND_BRK_SEQ;
writel(ctl, port->membase + UART_CTRL(port));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
unsigned long flags;
unsigned int baud, min_baud, max_baud;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
uart_update_timeout(port, termios->c_cflag, baud);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *mvebu_uart_type(struct uart_port *port)
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
intr = readl(port->membase + UART_INTR(port)) &
}
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int mvebu_uart_console_setup(struct console *co, char *options)
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void serial_omap_unthrottle(struct uart_port *port)
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static unsigned int check_modem_status(struct uart_omap_port *up)
irqreturn_t ret = IRQ_NONE;
int max_count = 256;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
do {
iir = serial_in(up, UART_IIR);
}
} while (max_count--);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
tty_flip_buffer_push(&up->port.state->port);
unsigned int ret = 0;
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return ret;
}
unsigned long flags;
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int serial_omap_startup(struct uart_port *port)
* Now, initialize the UART
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Most PC uarts need OUT2 raised to enable interrupts.
*/
up->port.mctrl |= TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
up->msr_saved_flags = 0;
/*
up->ier = 0;
serial_out(up, UART_IER, 0);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Disable break condition and FIFOs
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Update the per-port timeout.
serial_omap_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
unsigned int ier;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock(&up->port.lock);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
check_modem_status(up);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init
u32 val;
unsigned int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = owl_uart_read(port, OWL_UART_STAT);
ret = (val & OWL_UART_STAT_TFES) ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return ret;
}
unsigned long flags;
u32 stat;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stat = owl_uart_read(port, OWL_UART_STAT);
stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
owl_uart_write(port, stat, OWL_UART_STAT);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
u32 val;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = owl_uart_read(port, OWL_UART_CTL);
val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_RXIE
| OWL_UART_CTL_TXDE | OWL_UART_CTL_RXDE | OWL_UART_CTL_EN);
owl_uart_write(port, val, OWL_UART_CTL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
if (ret)
return ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = owl_uart_read(port, OWL_UART_STAT);
val |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP
val |= OWL_UART_CTL_EN;
owl_uart_write(port, val, OWL_UART_CTL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
u32 ctl;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ctl = owl_uart_read(port, OWL_UART_CTL);
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void owl_uart_release_port(struct uart_port *port)
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
else {
- spin_lock(&port->lock);
+ uart_port_lock(port);
locked = 1;
}
owl_uart_write(port, old_ctl, OWL_UART_CTL);
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
spin_lock_irqsave(&priv->lock, flags);
- spin_lock(&port->lock);
+ uart_port_lock(port);
uart_update_timeout(port, termios->c_cflag, baud);
rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
tty_termios_encode_baud_rate(termios, baud, baud);
out:
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
spin_unlock_irqrestore(&priv->lock, flags);
}
port_locked = 0;
} else if (oops_in_progress) {
priv_locked = spin_trylock(&priv->lock);
- port_locked = spin_trylock(&priv->port.lock);
+ port_locked = uart_port_trylock(&priv->port);
} else {
spin_lock(&priv->lock);
- spin_lock(&priv->port.lock);
+ uart_port_lock(&priv->port);
}
/*
iowrite8(ier, priv->membase + UART_IER);
if (port_locked)
- spin_unlock(&priv->port.lock);
+ uart_port_unlock(&priv->port);
if (priv_locked)
spin_unlock(&priv->lock);
local_irq_restore(flags);
struct pic32_sport *sport = to_pic32_sport(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (ctl)
pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
PIC32_UART_STA_UTXBRK);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* get port type in string format */
*/
max_count = PIC32_UART_RX_FIFO_DEPTH;
- spin_lock(&port->lock);
+ uart_port_lock(port);
tty = &port->state->port;
} while (--max_count);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
tty_flip_buffer_push(tty);
}
struct uart_port *port = dev_id;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pic32_uart_do_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
unsigned long flags;
/* disable uart */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pic32_uart_dsbl_and_mask(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
clk_disable_unprepare(sport->clk);
/* free all 3 interrupts for this UART */
unsigned int quot;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* disable uart and mask all interrupts while changing speed */
pic32_uart_dsbl_and_mask(port);
/* enable uart */
pic32_uart_en_and_unmask(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* serial core request to claim uart iomem */
#endif /* USE_CTRL_O_SYSRQ */
if (uap->port.sysrq) {
int swallow;
- spin_unlock(&uap->port.lock);
+ uart_port_unlock(&uap->port);
swallow = uart_handle_sysrq_char(&uap->port, ch);
- spin_lock(&uap->port.lock);
+ uart_port_lock(&uap->port);
if (swallow)
goto next_char;
}
uap_a = pmz_get_port_A(uap);
uap_b = uap_a->mate;
- spin_lock(&uap_a->port.lock);
+ uart_port_lock(&uap_a->port);
r3 = read_zsreg(uap_a, R3);
/* Channel A */
rc = IRQ_HANDLED;
}
skip_a:
- spin_unlock(&uap_a->port.lock);
+ uart_port_unlock(&uap_a->port);
if (push)
tty_flip_buffer_push(&uap->port.state->port);
if (!uap_b)
goto out;
- spin_lock(&uap_b->port.lock);
+ uart_port_lock(&uap_b->port);
push = false;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
if (!ZS_IS_OPEN(uap_b)) {
rc = IRQ_HANDLED;
}
skip_b:
- spin_unlock(&uap_b->port.lock);
+ uart_port_unlock(&uap_b->port);
if (push)
tty_flip_buffer_push(&uap->port.state->port);
unsigned long flags;
u8 status;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
status = read_zsreg(uap, R0);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
return status;
}
else
clear_bits |= SND_BRK;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != uap->curregs[R5]) {
write_zsreg(uap, R5, uap->curregs[R5]);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#ifdef CONFIG_PPC_PMAC
{
unsigned long flags;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
uap->curregs[R5] |= DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
msleep(110);
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
uap->curregs[R5] &= ~DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
msleep(10);
}
* initialize the chip
*/
if (!ZS_IS_CONS(uap)) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pwr_delay = __pmz_startup(uap);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
pmz_irda_reset(uap);
/* Enable interrupt requests for the channel */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
pmz_interrupt_control(uap, 1);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
struct uart_pmac_port *uap = to_pmz(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable interrupt requests for the channel */
pmz_interrupt_control(uap, 0);
pmz_maybe_update_regs(uap);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Release interrupt handler */
free_irq(uap->port.irq, uap);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
if (!ZS_IS_CONS(uap))
pmz_set_scc_power(uap, 0); /* Shut the chip down */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Shared by TTY driver and serial console setup. The port lock is held
struct uart_pmac_port *uap = to_pmz(port);
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable IRQs on the port */
pmz_interrupt_control(uap, 0);
if (ZS_IS_OPEN(uap))
pmz_interrupt_control(uap, 1);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *pmz_type(struct uart_port *port)
struct uart_pmac_port *uap = &pmz_ports[con->index];
unsigned long flags;
- spin_lock_irqsave(&uap->port.lock, flags);
+ uart_port_lock_irqsave(&uap->port, &flags);
/* Turn of interrupts and enable the transmitter. */
write_zsreg(uap, R1, uap->curregs[1] & ~TxINT_ENAB);
write_zsreg(uap, R1, uap->curregs[1]);
/* Don't disable the transmitter. */
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
}
/*
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
return IRQ_NONE;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
lsr = serial_in(up, UART_LSR);
if (lsr & UART_LSR_DR)
receive_chars(up, &lsr);
check_modem_status(up);
if (lsr & UART_LSR_THRE)
transmit_chars(up);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
return IRQ_HANDLED;
}
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return ret;
}
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int serial_pxa_startup(struct uart_port *port)
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.mctrl |= TIOCM_OUT2;
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Finally, enable interrupts. Note: Modem status interrupts
up->ier = 0;
serial_out(up, UART_IER, 0);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Disable break condition and FIFOs
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Ensure the port will be enabled.
up->lcr = cval; /* Save LCR */
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
serial_out(up, UART_FCR, fcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ locked = uart_port_trylock(&up->port);
else
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
/*
* First save the IER then disable the interrupts
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
local_irq_restore(flags);
clk_disable(up->clk);
uport = &port->uport;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&uport->lock, flags);
+ locked = uart_port_trylock_irqsave(uport, &flags);
else
- spin_lock_irqsave(&uport->lock, flags);
+ uart_port_lock_irqsave(uport, &flags);
geni_status = readl(uport->membase + SE_GENI_STATUS);
qcom_geni_serial_setup_tx(uport, port->tx_remaining);
if (locked)
- spin_unlock_irqrestore(&uport->lock, flags);
+ uart_port_unlock_irqrestore(uport, flags);
}
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
if (uport->suspended)
return IRQ_NONE;
- spin_lock(&uport->lock);
+ uart_port_lock(uport);
m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
unsigned int ret;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = rda_uart_read(port, RDA_UART_STATUS);
ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return ret;
}
unsigned int baud;
u32 irq_mask;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4);
rda_uart_change_baudrate(rda_port, baud);
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void rda_uart_send_chars(struct uart_port *port)
unsigned long flags;
u32 val, irq_mask;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Clear IRQ cause */
val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
rda_uart_send_chars(port);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
int ret;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND,
"rda-uart", port);
if (ret)
return ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
val = rda_uart_read(port, RDA_UART_CTRL);
val |= RDA_UART_ENABLE;
val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
rda_uart_write(port, val, RDA_UART_IRQ_MASK);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
unsigned long flags;
u32 val;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
rda_uart_stop_tx(port);
rda_uart_stop_rx(port);
val &= ~RDA_UART_ENABLE;
rda_uart_write(port, val, RDA_UART_CTRL);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *rda_uart_type(struct uart_port *port)
rda_uart_request_port(port);
}
- spin_lock_irqsave(&port->lock, irq_flags);
+ uart_port_lock_irqsave(port, &irq_flags);
/* Clear mask, so no surprise interrupts. */
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
/* Clear status register */
rda_uart_write(port, 0, RDA_UART_STATUS);
- spin_unlock_irqrestore(&port->lock, irq_flags);
+ uart_port_unlock_irqrestore(port, irq_flags);
}
static void rda_uart_release_port(struct uart_port *port)
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
+ locked = uart_port_trylock(port);
} else {
- spin_lock(&port->lock);
+ uart_port_lock(port);
locked = 1;
}
rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
if (locked)
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
local_irq_restore(flags);
}
* But the TXEMPTY bit doesn't seem to work unless the TX IRQ is
* enabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return tx_fifo_bytes ? 0 : TIOCSER_TEMT;
}
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m,
break_state ? RP2_TXRX_CTL_BREAK_m : 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void rp2_uart_enable_ms(struct uart_port *port)
if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* ignore all characters if CREAD is not set */
port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ;
__rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div);
uart_update_timeout(port, new->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void rp2_rx_chars(struct rp2_uart_port *up)
{
u32 status;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
/*
* The IRQ status bits are clear-on-write. Other status bits in
if (status & RP2_CHAN_STAT_MS_CHANGED_MASK)
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
}
static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id)
rp2_uart_break_ctl(port, 0);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
rp2_mask_ch_irq(up, up->idx, 0);
rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *rp2_uart_type(struct uart_port *port)
unsigned long flags;
if (sport->port.state) {
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sa1100_mctrl_check(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
struct sa1100_port *sport = dev_id;
unsigned int status, pass_counter = 0;
- spin_lock(&sport->port.lock);
+ uart_port_lock(&sport->port);
status = UART_GET_UTSR0(sport);
status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
do {
status &= SM_TO_UTSR0(sport->port.read_status_mask) |
~UTSR0_TFS;
} while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
- spin_unlock(&sport->port.lock);
+ uart_port_unlock(&sport->port);
return IRQ_HANDLED;
}
unsigned long flags;
unsigned int utcr3;
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
utcr3 = UART_GET_UTCR3(sport);
if (break_state == -1)
utcr3 |= UTCR3_BRK;
else
utcr3 &= ~UTCR3_BRK;
UART_PUT_UTCR3(sport, utcr3);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static int sa1100_startup(struct uart_port *port)
/*
* Enable modem status interrupts
*/
- spin_lock_irq(&sport->port.lock);
+ uart_port_lock_irq(&sport->port);
sa1100_enable_ms(&sport->port);
- spin_unlock_irq(&sport->port.lock);
+ uart_port_unlock_irq(&sport->port);
return 0;
}
del_timer_sync(&sport->timer);
- spin_lock_irqsave(&sport->port.lock, flags);
+ uart_port_lock_irqsave(&sport->port, &flags);
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
sa1100_enable_ms(&sport->port);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_port_unlock_irqrestore(&sport->port, flags);
}
static const char *sa1100_type(struct uart_port *port)
unsigned int ucon, ufcon;
int count = 10000;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (--count && !s3c24xx_serial_txempty_nofifo(port))
udelay(100);
wr_regl(port, S3C2410_UCON, ucon);
ourport->rx_enabled = 1;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void s3c24xx_serial_rx_disable(struct uart_port *port)
unsigned long flags;
unsigned int ucon;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
ourport->rx_enabled = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void s3c24xx_serial_stop_tx(struct uart_port *port)
dma->tx_transfer_addr, dma->tx_size,
DMA_TO_DEVICE);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_xmit_advance(port, count);
ourport->tx_in_progress = 0;
uart_write_wakeup(port);
s3c24xx_serial_start_next_tx(ourport);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
received = dma->rx_bytes_requested - state.residue;
async_tx_ack(dma->rx_desc);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (received)
s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
s3c64xx_start_rx_dma(ourport);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
utrstat = rd_regl(port, S3C2410_UTRSTAT);
rd_regl(port, S3C2410_UFSTAT);
- spin_lock(&port->lock);
+ uart_port_lock(port);
if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
s3c64xx_start_rx_dma(ourport);
wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
finish:
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
- spin_lock(&port->lock);
+ uart_port_lock(port);
s3c24xx_serial_rx_drain_fifo(ourport);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
struct s3c24xx_uart_port *ourport = id;
struct uart_port *port = &ourport->port;
- spin_lock(&port->lock);
+ uart_port_lock(port);
s3c24xx_serial_tx_chars(ourport);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
unsigned long flags;
unsigned int ucon;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ucon = rd_regl(port, S3C2410_UCON);
wr_regl(port, S3C2410_UCON, ucon);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
ourport->rx_enabled = 1;
ourport->tx_enabled = 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
enable_rx_pio(ourport);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Enable Rx Interrupt */
s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
ourport->rx_enabled = 1;
ourport->tx_enabled = 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
enable_rx_pio(ourport);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Enable Rx Interrupt */
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
ulcon |= S3C2410_LCON_PNONE;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
dev_dbg(port->dev,
"setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= RXSTAT_DUMMY_READ;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *s3c24xx_serial_type(struct uart_port *port)
if (cons_uart->sysrq)
locked = false;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&cons_uart->lock, flags);
+ locked = uart_port_trylock_irqsave(cons_uart, &flags);
else
- spin_lock_irqsave(&cons_uart->lock, flags);
+ uart_port_lock_irqsave(cons_uart, &flags);
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
if (locked)
- spin_unlock_irqrestore(&cons_uart->lock, flags);
+ uart_port_unlock_irqrestore(cons_uart, flags);
}
/* Shouldn't be __init, as it can be instantiated from other module */
else
aux &= ~M_DUART_CTS_CHNG_ENA;
- spin_lock(&uport->lock);
+ uart_port_lock(uport);
if (sport->tx_stopped)
command |= M_DUART_TX_DIS;
write_sbdchn(sport, R_DUART_CMD, command);
- spin_unlock(&uport->lock);
+ uart_port_unlock(uport);
}
unsigned int mask;
/* Disable transmit interrupts and enable the transmitter. */
- spin_lock_irqsave(&uport->lock, flags);
+ uart_port_lock_irqsave(uport, &flags);
mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
mask & ~M_DUART_IMR_TX);
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
- spin_unlock_irqrestore(&uport->lock, flags);
+ uart_port_unlock_irqrestore(uport, flags);
uart_console_write(&sport->port, s, count, sbd_console_putchar);
/* Restore transmit interrupts and the transmitter enable. */
- spin_lock_irqsave(&uport->lock, flags);
+ uart_port_lock_irqsave(uport, &flags);
sbd_line_drain(sport);
if (sport->tx_stopped)
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
- spin_unlock_irqrestore(&uport->lock, flags);
+ uart_port_unlock_irqrestore(uport, flags);
}
static int __init sbd_console_setup(struct console *co, char *options)
{
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ unsigned long flags;
if ((port->rs485.flags & SER_RS485_ENABLED) &&
(port->rs485.delay_rts_before_send > 0))
mutex_lock(&one->efr_lock);
sc16is7xx_handle_tx(port);
mutex_unlock(&one->efr_lock);
+
+ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+ uart_port_unlock_irqrestore(port, flags);
}
static void sc16is7xx_reconf_rs485(struct uart_port *port)
divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
}
- spin_lock_irqsave(&tup->uport.lock, flags);
+ uart_port_lock_irqsave(&tup->uport, &flags);
lcr = tup->lcr_shadow;
lcr |= UART_LCR_DLAB;
tegra_uart_write(tup, lcr, UART_LCR);
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
- spin_unlock_irqrestore(&tup->uport.lock, flags);
+ uart_port_unlock_irqrestore(&tup->uport, flags);
tup->current_baud = baud;
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
- spin_lock_irqsave(&tup->uport.lock, flags);
+ uart_port_lock_irqsave(&tup->uport, &flags);
uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
tegra_uart_start_next_tx(tup);
- spin_unlock_irqrestore(&tup->uport.lock, flags);
+ uart_port_unlock_irqrestore(&tup->uport, flags);
}
static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
unsigned int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
if (!tup->tx_in_progress) {
unsigned long lsr = tegra_uart_read(tup, UART_LSR);
if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
ret = TIOCSER_TEMT;
}
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
return ret;
}
struct dma_tx_state state;
enum dma_status status;
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
set_rts(tup, true);
done:
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
}
static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
bool is_rx_int = false;
unsigned long flags;
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
while (1) {
iir = tegra_uart_read(tup, UART_IIR);
if (iir & UART_IIR_NO_INT) {
} else if (is_rx_start) {
tegra_uart_start_rx_dma(tup);
}
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
return IRQ_HANDLED;
}
}
}
- spin_lock_irqsave(&tup->uport.lock, flags);
+ uart_port_lock_irqsave(&tup->uport, &flags);
/* Reset the Rx and Tx FIFOs */
tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
tup->current_baud = 0;
- spin_unlock_irqrestore(&tup->uport.lock, flags);
+ uart_port_unlock_irqrestore(&tup->uport, flags);
tup->rx_in_progress = 0;
tup->tx_in_progress = 0;
int ret;
max_divider *= 16;
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
/* Changing configuration, it is safe to stop any rx now */
if (tup->rts_active)
baud = uart_get_baud_rate(u, termios, oldtermios,
parent_clk_rate/max_divider,
parent_clk_rate/16);
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
ret = tegra_set_baudrate(tup, baud);
if (ret < 0) {
dev_err(tup->uport.dev, "Failed to set baud rate\n");
}
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
- spin_lock_irqsave(&u->lock, flags);
+ uart_port_lock_irqsave(u, &flags);
/* Flow control */
if (termios->c_cflag & CRTSCTS) {
if (termios->c_iflag & IGNBRK)
tup->uport.ignore_status_mask |= UART_LSR_BI;
- spin_unlock_irqrestore(&u->lock, flags);
+ uart_port_unlock_irqrestore(u, flags);
}
static const char *tegra_uart_type(struct uart_port *u)
({ \
struct uart_port *__uport = uart_port_ref(state); \
if (__uport) \
- spin_lock_irqsave(&__uport->lock, flags); \
+ uart_port_lock_irqsave(__uport, &flags); \
__uport; \
})
({ \
struct uart_port *__uport = uport; \
if (__uport) { \
- spin_unlock_irqrestore(&__uport->lock, flags); \
+ uart_port_unlock_irqrestore(__uport, flags); \
uart_port_deref(__uport); \
} \
})
unsigned long flags;
unsigned int old;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
/*
* Set modem status enables based on termios cflag
*/
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
if (termios->c_cflag & CRTSCTS)
uport->status |= UPSTAT_CTS_ENABLE;
else
else
__uart_start(state);
}
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
}
/*
if (port->ops->send_xchar)
port->ops->send_xchar(port, ch);
else {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->x_char = ch;
if (ch)
port->ops->start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
uart_port_deref(port);
}
if (!tty_io_error(tty)) {
result = uport->mctrl;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
result |= uport->ops->get_mctrl(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
}
out:
mutex_unlock(&port->mutex);
uport = uart_port_ref(state);
if (!uport)
return -EIO;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
uart_enable_ms(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
add_wait_queue(&port->delta_msr_wait, &wait);
for (;;) {
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
set_current_state(TASK_INTERRUPTIBLE);
uport = uart_port_ref(state);
if (!uport)
return -EIO;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
uart_port_deref(uport);
icount->cts = cnow.cts;
uart_set_rs485_termination(port, rs485);
uart_set_rs485_rx_during_tx(port, rs485);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = port->rs485_config(port, NULL, rs485);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (ret) {
memset(rs485, 0, sizeof(*rs485));
/* unset GPIOs */
unsigned long flags;
struct serial_rs485 aux;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
aux = port->rs485;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (copy_to_user(rs485, &aux, sizeof(aux)))
return -EFAULT;
uart_set_rs485_termination(port, &rs485);
uart_set_rs485_rx_during_tx(port, &rs485);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = port->rs485_config(port, &tty->termios, &rs485);
if (!ret) {
port->rs485 = rs485;
if (!(rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (ret) {
/* restore old GPIO settings */
gpiod_set_value_cansleep(port->rs485_term_gpio,
if (!port->iso7816_config)
return -ENOTTY;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
aux = port->iso7816;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (copy_to_user(iso7816, &aux, sizeof(aux)))
return -EFAULT;
if (iso7816.reserved[i])
return -EINVAL;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = port->iso7816_config(port, &iso7816);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (ret)
return ret;
if (WARN(!uport, "detached port still initialized!\n"))
return;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
uport->ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
uart_port_shutdown(port);
/*
* Free the transmit buffer.
*/
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
buf = state->xmit.buf;
state->xmit.buf = NULL;
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
free_page((unsigned long)buf);
*/
if (WARN_ON(!uport))
return true;
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
uart_enable_ms(uport);
mctrl = uport->ops->get_mctrl(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
uart_port_deref(uport);
return mctrl & TIOCM_CAR;
pm_state = state->pm_state;
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, UART_PM_STATE_ON);
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
status = uport->ops->get_mctrl(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, pm_state);
*/
if (!console_suspend_enabled && uart_console(uport)) {
if (uport->ops->start_rx) {
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
uport->ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
}
goto unlock;
}
tty_port_set_suspended(port, true);
tty_port_set_initialized(port, false);
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
ops->stop_tx(uport);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, 0);
mctrl = uport->mctrl;
uport->mctrl = 0;
ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
/*
* Wait for the transmitter to empty.
uart_change_pm(state, UART_PM_STATE_ON);
uport->ops->set_termios(uport, &termios, NULL);
if (!console_suspend_enabled && uport->ops->start_rx) {
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
uport->ops->start_rx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
}
if (console_suspend_enabled)
console_start(uport->cons);
int ret;
uart_change_pm(state, UART_PM_STATE_ON);
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, 0);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
if (console_suspend_enabled || !uart_console(uport)) {
/* Protected by port mutex for now */
struct tty_struct *tty = port->tty;
if (tty)
uart_change_line_settings(tty, state, NULL);
uart_rs485_config(uport);
- spin_lock_irq(&uport->lock);
+ uart_port_lock_irq(uport);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, uport->mctrl);
ops->start_tx(uport);
- spin_unlock_irq(&uport->lock);
+ uart_port_unlock_irq(uport);
tty_port_set_initialized(port, true);
} else {
/*
* keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->mctrl &= TIOCM_DTR;
if (!(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
uart_rs485_config(port);
mctrl_gpio_get(gpios, &mctrl);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
mctrl_diff = mctrl ^ gpios->mctrl_prev;
gpios->mctrl_prev = mctrl;
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
goto out;
/* Flush any pending TX for the port */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (__serial_port_busy(port))
port->ops->start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
out:
pm_runtime_mark_last_busy(dev);
unsigned int status;
while (1) {
- spin_lock(&up->lock);
+ uart_port_lock(up);
status = sio_in(up, TXX9_SIDISR);
if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
status &= ~TXX9_SIDISR_TDIS;
if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
TXX9_SIDISR_TOUT))) {
- spin_unlock(&up->lock);
+ uart_port_unlock(up);
break;
}
sio_mask(up, TXX9_SIDISR,
TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
TXX9_SIDISR_TOUT);
- spin_unlock(&up->lock);
+ uart_port_unlock(up);
if (pass_counter++ > PASS_LIMIT)
break;
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
return ret;
}
{
unsigned long flags;
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
if (break_state == -1)
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
else
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
}
#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/*
* Now, initialize the UART
*/
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
serial_txx9_set_mctrl(up, up->mctrl);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
/* Enable RX/TX */
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
*/
sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
serial_txx9_set_mctrl(up, up->mctrl);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
/*
* Disable break condition
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->lock, flags);
+ uart_port_lock_irqsave(up, &flags);
/*
* Update the per-port timeout.
sio_out(up, TXX9_SIFCR, fcr);
serial_txx9_set_mctrl(up, up->mctrl);
- spin_unlock_irqrestore(&up->lock, flags);
+ uart_port_unlock_irqrestore(up, flags);
}
static void
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_xmit_advance(port, s->tx_dma_len);
}
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Locking: called with port lock held */
dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
s->active_rx);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
active = sci_dma_rx_find_active(s);
if (active >= 0)
dma_async_issue_pending(chan);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
__func__, s->cookie_rx[active], active, s->active_rx);
return;
fail:
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
/* Switch to PIO */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
dmaengine_terminate_async(chan);
sci_dma_rx_chan_invalidate(s);
sci_dma_rx_reenable_irq(s);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void sci_dma_tx_release(struct sci_port *s)
fail:
/* Switch to PIO */
if (!port_lock_held)
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (i)
dmaengine_terminate_async(chan);
sci_dma_rx_chan_invalidate(s);
sci_start_rx(port);
if (!port_lock_held)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return -EAGAIN;
}
* transmit till the end, and then the rest. Take the port lock to get a
* consistent xmit buffer state.
*/
- spin_lock_irq(&port->lock);
+ uart_port_lock_irq(port);
head = xmit->head;
tail = xmit->tail;
buf = s->tx_dma_addr + tail;
s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE);
if (!s->tx_dma_len) {
/* Transmit buffer has been flushed */
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
return;
}
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
goto switch_to_pio;
}
desc->callback_param = s;
s->cookie_tx = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_tx)) {
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
goto switch_to_pio;
}
- spin_unlock_irq(&port->lock);
+ uart_port_unlock_irq(port);
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
__func__, xmit->buf, tail, head, s->cookie_tx);
return;
switch_to_pio:
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
s->chan_tx = NULL;
sci_start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
dev_dbg(port->dev, "DMA Rx timed out\n");
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
active = sci_dma_rx_find_active(s);
if (active < 0) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return HRTIMER_NORESTART;
}
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
if (status == DMA_COMPLETE) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
s->active_rx, active);
*/
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
if (status == DMA_COMPLETE) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
return HRTIMER_NORESTART;
}
sci_dma_rx_reenable_irq(s);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return HRTIMER_NORESTART;
}
struct uart_port *port = ptr;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sci_transmit_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
if (port->type != PORT_SCI)
return sci_tx_interrupt(irq, ptr);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ctrl = serial_port_in(port, SCSCR);
ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
serial_port_out(port, SCSCR, ctrl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
}
return;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
scsptr = serial_port_in(port, SCSPTR);
scscr = serial_port_in(port, SCSCR);
serial_port_out(port, SCSPTR, scsptr);
serial_port_out(port, SCSCR, scscr);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sci_startup(struct uart_port *port)
s->autorts = false;
mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sci_stop_rx(port);
sci_stop_tx(port);
/*
scr = serial_port_in(port, SCSCR);
serial_port_out(port, SCSCR, scr &
(SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (s->chan_rx_saved) {
serial_port_out(port, SCCKS, sccks);
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
sci_reset(port);
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
sci_port_disable(s);
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* first save SCSCR then disable interrupts, keep clock source */
ctrl = serial_port_in(port, SCSCR);
serial_port_out(port, SCSCR, ctrl);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int serial_console_setup(struct console *co, char *options)
struct sifive_serial_port *ssp = dev_id;
u32 ip;
- spin_lock(&ssp->port.lock);
+ uart_port_lock(&ssp->port);
ip = __ssp_readl(ssp, SIFIVE_SERIAL_IP_OFFS);
if (!ip) {
- spin_unlock(&ssp->port.lock);
+ uart_port_unlock(&ssp->port);
return IRQ_NONE;
}
if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
__ssp_transmit_chars(ssp);
- spin_unlock(&ssp->port.lock);
+ uart_port_unlock(&ssp->port);
return IRQ_HANDLED;
}
ssp->port.uartclk / 16);
__ssp_update_baud_rate(ssp, rate);
- spin_lock_irqsave(&ssp->port.lock, flags);
+ uart_port_lock_irqsave(&ssp->port, &flags);
/* Update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, rate);
if (v != old_v)
__ssp_writel(v, SIFIVE_SERIAL_RXCTRL_OFFS, ssp);
- spin_unlock_irqrestore(&ssp->port.lock, flags);
+ uart_port_unlock_irqrestore(&ssp->port, flags);
}
static void sifive_serial_release_port(struct uart_port *port)
if (ssp->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&ssp->port.lock);
+ locked = uart_port_trylock(&ssp->port);
else
- spin_lock(&ssp->port.lock);
+ uart_port_lock(&ssp->port);
ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
__ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
__ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
if (locked)
- spin_unlock(&ssp->port.lock);
+ uart_port_unlock(&ssp->port);
local_irq_restore(flags);
}
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
sp->tx_dma.trans_len, DMA_TO_DEVICE);
sprd_tx_dma_config(port))
sp->tx_dma.trans_len = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sprd_uart_dma_submit(struct uart_port *port,
enum dma_status status;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = dmaengine_tx_status(sp->rx_dma.chn,
sp->rx_dma.cookie, &state);
if (status != DMA_COMPLETE) {
sprd_stop_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
if (sprd_start_dma_rx(port))
sprd_stop_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sprd_start_dma_rx(struct uart_port *port)
struct uart_port *port = dev_id;
unsigned int ims;
- spin_lock(&port->lock);
+ uart_port_lock(port);
ims = serial_in(port, SPRD_IMSR);
if (!ims) {
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_NONE;
}
if (ims & SPRD_IMSR_TX_FIFO_EMPTY)
sprd_tx(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
serial_out(port, SPRD_CTL1, fc);
/* enable interrupt */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ien = serial_in(port, SPRD_IEN);
ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
if (!sp->rx_dma.enable)
ien |= SPRD_IEN_RX_FULL;
serial_out(port, SPRD_IEN, ien);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
lcr |= SPRD_LCR_EVEN_PAR;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
serial_out(port, SPRD_CTL1, fc);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_console_write(port, s, count, sprd_console_putchar);
wait_for_xmitr(port);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int sprd_console_setup(struct console *co, char *options)
struct uart_port *port = ptr;
u32 status;
- spin_lock(&port->lock);
+ uart_port_lock(port);
status = asc_in(port, ASC_STA);
asc_transmit_chars(port);
}
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
* we can come to turning it off. Note this is not called with
* the port spinlock held.
*/
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN;
asc_out(port, ASC_CTL, ctl);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
clk_disable_unprepare(ascport->clk);
break;
}
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
cflag = termios->c_cflag;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* read control register */
ctrl_val = asc_in(port, ASC_CTL);
/* write final value and enable port */
asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *asc_type(struct uart_port *port)
if (port->sysrq)
locked = 0; /* asc_interrupt has already claimed the lock */
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Disable interrupts so we don't get the IRQ line bouncing
asc_out(port, ASC_INTEN, intenable);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int asc_console_setup(struct console *co, char *options)
unsigned int size;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (size)
stm32_usart_tx_dma_terminate(stm32port);
/* Let's see if we have pending data to send */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stm32_usart_transmit_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
if (!stm32_port->throttled) {
if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
- spin_lock(&port->lock);
+ uart_port_lock(port);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port);
if (size)
}
if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
- spin_lock(&port->lock);
+ uart_port_lock(port);
stm32_usart_transmit_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
/* Receiver timeout irq for DMA RX */
if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
- spin_lock(&port->lock);
+ uart_port_lock(port);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port);
if (size)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/*
* Pause DMA transfer, so the RX data gets queued into the FIFO.
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
stm32_port->throttled = true;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Unthrottle the remote, the input buffer can now accept data. */
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
if (stm32_port->rx_ch)
stm32_usart_rx_dma_start_or_resume(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Receive stop */
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
isr,
writel_relaxed(cr1, port->membase + ofs->cr1);
stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
/* Handle modem control interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
pm_runtime_get_sync(port->dev);
break;
case UART_PM_STATE_OFF:
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
pm_runtime_put_sync(port->dev);
break;
}
int locked = 1;
if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Save and disable interrupts, enable the transmitter */
old_cr1 = readl_relaxed(port->membase + ofs->cr1);
writel_relaxed(old_cr1, port->membase + ofs->cr1);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int stm32_usart_console_setup(struct console *co, char *options)
* low-power mode.
*/
if (stm32_port->rx_ch) {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Poll data from DMA RX buffer if any */
if (!stm32_usart_rx_dma_pause(stm32_port))
size += stm32_usart_receive_chars(port, true);
struct tty_port *tport;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
tport = receive_chars(port);
transmit_chars(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (tport)
tty_flip_buffer_push(tport);
if (ch == __DISABLED_CHAR)
return;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (limit-- > 0) {
long status = sun4v_con_putchar(ch);
udelay(1);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* port->lock held by caller. */
unsigned long flags;
int limit = 10000;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (limit-- > 0) {
long status = sun4v_con_putchar(CON_BREAK);
udelay(1);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
}
unsigned int iflag, cflag;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
iflag = termios->c_iflag;
cflag = termios->c_cflag;
uart_update_timeout(port, cflag,
(port->uartclk / (16 * quot)));
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *sunhv_type(struct uart_port *port)
int locked = 1;
if (port->sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
}
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
int i, locked = 1;
if (port->sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
for (i = 0; i < n; i++) {
if (*s == '\n')
}
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static struct console sunhv_console = {
unsigned long flags;
unsigned int lcr;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
lcr = readl(port->membase + SUP_UART_LCR);
writel(lcr, port->membase + SUP_UART_LCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void transmit_chars(struct uart_port *port)
struct uart_port *port = args;
unsigned int isc;
- spin_lock(&port->lock);
+ uart_port_lock(port);
isc = readl(port->membase + SUP_UART_ISC);
if (isc & SUP_UART_ISC_TX)
transmit_chars(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
if (ret)
return ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* isc define Bit[7:4] int setting, Bit[3:0] int status
* isc register will clean Bit[3:0] int status after read
* only do a write to Bit[7:4] int setting
*/
isc |= SUP_UART_ISC_RXM;
writel(isc, port->membase + SUP_UART_ISC);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* isc define Bit[7:4] int setting, Bit[3:0] int status
* isc register will clean Bit[3:0] int status after read
* only do a write to Bit[7:4] int setting
*/
writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
lcr |= UART_LCR_EPAR;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
writel(div_l, port->membase + SUP_UART_DIV_L);
writel(lcr, port->membase + SUP_UART_LCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios)
if (sunplus_console_ports[co->index]->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&sunplus_console_ports[co->index]->port.lock);
+ locked = uart_port_trylock(&sunplus_console_ports[co->index]->port);
else
- spin_lock(&sunplus_console_ports[co->index]->port.lock);
+ uart_port_lock(&sunplus_console_ports[co->index]->port);
uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
sunplus_uart_console_putchar);
if (locked)
- spin_unlock(&sunplus_console_ports[co->index]->port.lock);
+ uart_port_unlock(&sunplus_console_ports[co->index]->port);
local_irq_restore(flags);
}
unsigned long flags;
unsigned char gis;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
status.stat = 0;
gis = readb(&up->regs->r.gis) >> up->gis_shift;
transmit_chars(up, &status);
}
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
if (port)
tty_flip_buffer_push(port);
if (ch == __DISABLED_CHAR)
return;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
sunsab_tec_wait(up);
writeb(ch, &up->regs->w.tic);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
/* port->lock held by caller. */
unsigned long flags;
unsigned char val;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
val = up->cached_dafo;
if (break_state)
if (test_bit(SAB82532_XPR, &up->irqflags))
sunsab_tx_idle(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
/* port->lock is not held. */
if (err)
return err;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Wait for any commands or immediate characters
set_bit(SAB82532_ALLS, &up->irqflags);
set_bit(SAB82532_XPR, &up->irqflags);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
container_of(port, struct uart_sunsab_port, port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/* Disable Interrupts */
up->interrupt_mask0 = 0xff;
writeb(tmp, &up->regs->rw.ccr0);
#endif
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
free_irq(up->port.irq, up);
}
unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
unsigned int quot = uart_get_divisor(port, baud);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static const char *sunsab_type(struct uart_port *port)
int locked = 1;
if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int sunsab_console_setup(struct console *con, char *options)
*/
sunsab_startup(&up->port);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Finally, enable interrupts
sunsab_convert_to_sab(up, con->cflag, 0, baud, quot);
sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
{
if (up->port.type == PORT_RSA) {
if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
__enable_rsa(up);
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
serial_outp(up, UART_RSA_FRR, 0);
if (up->port.type == PORT_RSA &&
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
+ uart_port_lock_irq(&up->port);
mode = serial_inp(up, UART_RSA_MSR);
result = !(mode & UART_RSA_MSR_FIFO);
if (result)
up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
- spin_unlock_irq(&up->port.lock);
+ uart_port_unlock_irq(&up->port);
}
}
#endif /* CONFIG_SERIAL_8250_RSA */
container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void
unsigned long flags;
unsigned char status;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
do {
status = serial_inp(up, UART_LSR);
} while (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT));
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return IRQ_HANDLED;
}
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return ret;
}
container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int sunsu_startup(struct uart_port *port)
*/
serial_outp(up, UART_LCR, UART_LCR_WLEN8);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->port.mctrl |= TIOCM_OUT2;
sunsu_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Finally, enable interrupts. Note: Modem status interrupts
up->ier = 0;
serial_outp(up, UART_IER, 0);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (up->port.flags & UPF_FOURPORT) {
/* reset interrupts on the AST Fourport board */
inb((up->port.iobase & 0xfe0) | 0x1f);
up->port.mctrl &= ~TIOCM_OUT2;
sunsu_set_mctrl(&up->port, up->port.mctrl);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
/*
* Disable break condition and FIFOs
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* Update the per-port timeout.
up->cflag = cflag;
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static void
up->type_probed = PORT_UNKNOWN;
up->port.iotype = UPIO_MEM;
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (!(up->port.flags & UPF_BUGGY_UART)) {
/*
serial_outp(up, UART_IER, 0);
out:
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static struct uart_driver sunsu_reg = {
int locked = 1;
if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the UER then disable the interrupts
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
/*
struct tty_port *port;
unsigned char r3;
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
r3 = read_zsreg(channel, R3);
/* Channel A */
if (r3 & CHATxIP)
sunzilog_transmit_chars(up, channel);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
if (port)
tty_flip_buffer_push(port);
up = up->next;
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
- spin_lock(&up->port.lock);
+ uart_port_lock(&up->port);
port = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
writeb(RES_H_IUS, &channel->control);
if (r3 & CHBTxIP)
sunzilog_transmit_chars(up, channel);
}
- spin_unlock(&up->port.lock);
+ uart_port_unlock(&up->port);
if (port)
tty_flip_buffer_push(port);
unsigned char status;
unsigned int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = sunzilog_read_channel_status(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (status & Tx_BUF_EMP)
ret = TIOCSER_TEMT;
else
clear_bits |= SND_BRK;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != up->curregs[R5]) {
write_zsreg(channel, R5, up->curregs[R5]);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void __sunzilog_startup(struct uart_sunzilog_port *up)
if (ZS_IS_CONS(up))
return 0;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
__sunzilog_startup(up);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
if (ZS_IS_CONS(up))
return;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
channel = ZILOG_CHANNEL_FROM_PORT(port);
up->curregs[R5] &= ~SND_BRK;
sunzilog_maybe_update_regs(up, channel);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/* Shared by TTY driver and serial console setup. The port lock is held
baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static const char *sunzilog_type(struct uart_port *port)
int locked = 1;
if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
up->curregs[R15] |= BRKIE;
sunzilog_convert_to_zs(up, con->cflag, 0, brg);
sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
__sunzilog_startup(up);
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
- spin_lock_irqsave(&up->port.lock, flags);
+ uart_port_lock_irqsave(&up->port, &flags);
if (ZS_IS_CHANNEL_A(up)) {
write_zsreg(channel, R9, FHWRES);
ZSDELAY_LONG();
write_zsreg(channel, R9, up->curregs[R9]);
}
- spin_unlock_irqrestore(&up->port.lock, flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
#ifdef CONFIG_SERIO
if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
struct timbuart_port *uart = from_tasklet(uart, t, tasklet);
u32 isr, ier = 0;
- spin_lock(&uart->port.lock);
+ uart_port_lock(&uart->port);
isr = ioread32(uart->port.membase + TIMBUART_ISR);
dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
iowrite32(ier, uart->port.membase + TIMBUART_IER);
- spin_unlock(&uart->port.lock);
+ uart_port_unlock(&uart->port);
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
}
tty_termios_copy_hw(termios, old);
tty_termios_encode_baud_rate(termios, baud, baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *timbuart_type(struct uart_port *port)
unsigned long flags;
do {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
stat = uart_in32(ULITE_STATUS, port);
busy = ulite_receive(port, stat);
busy |= ulite_transmit(port, stat);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
n++;
} while (busy);
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
ret = uart_in32(ULITE_STATUS, port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
}
termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
| ULITE_STATUS_TXFULL;
/* update timeout */
uart_update_timeout(port, termios->c_cflag, pdata->baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *ulite_type(struct uart_port *port)
int locked = 1;
if (oops_in_progress) {
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
} else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* save and disable interrupt */
ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int ulite_console_setup(struct console *co, char *options)
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
/* Do we really need a spinlock here? */
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
struct uart_port *port = dev_id;
unsigned long isr;
- spin_lock(&port->lock);
+ uart_port_lock(port);
isr = vt8500_read(port, VT8500_URISR);
/* Acknowledge active status bits */
if (isr & TCTS)
handle_delta_cts(port);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
unsigned int baud, lcr;
unsigned int loops = 1000;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* calculate and set baud rate */
baud = uart_get_baud_rate(port, termios, old, 900, 921600);
vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *vt8500_type(struct uart_port *port)
struct uart_port *port = (struct uart_port *)dev_id;
unsigned int isrstatus;
- spin_lock(&port->lock);
+ uart_port_lock(port);
/* Read the interrupt status register to determine which
* interrupt(s) is/are active and clear them.
!(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
cdns_uart_handle_rx(dev_id, isrstatus);
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return IRQ_HANDLED;
}
return NOTIFY_BAD;
}
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
/* Disable the TX and RX to set baud rate */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
- spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
+ uart_port_unlock_irqrestore(cdns_uart->port, flags);
return NOTIFY_OK;
}
* frequency.
*/
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
locked = 1;
port->uartclk = ndata->new_rate;
fallthrough;
case ABORT_RATE_CHANGE:
if (!locked)
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
/* Set TX/RX Reset */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
- spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
+ uart_port_unlock_irqrestore(cdns_uart->port, flags);
return NOTIFY_OK;
default:
unsigned int status;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
status = readl(port->membase + CDNS_UART_CR);
writel(CDNS_UART_CR_STOPBRK | status,
port->membase + CDNS_UART_CR);
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/**
unsigned long flags;
unsigned int ctrl_reg, mode_reg;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable the TX and RX to set baud rate */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
cval &= ~CDNS_UART_MODEMCR_FCM;
writel(cval, port->membase + CDNS_UART_MODEMCR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/**
is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable the TX and RX */
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
writel(readl(port->membase + CDNS_UART_ISR),
port->membase + CDNS_UART_ISR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
if (ret) {
int status;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable interrupts */
status = readl(port->membase + CDNS_UART_IMR);
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
port->membase + CDNS_UART_CR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
free_irq(port->irq, port);
}
int c;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Check if FIFO is empty */
if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
else /* Read a character */
c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return c;
}
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Wait until FIFO is empty */
while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
cpu_relax();
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* save and disable interrupt */
imr = readl(port->membase + CDNS_UART_IMR);
writel(imr, port->membase + CDNS_UART_IER);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/**
if (console_suspend_enabled && uart_console(port) && may_wake) {
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Empty the receive FIFO 1st before making changes */
while (!(readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_RXEMPTY))
writel(1, port->membase + CDNS_UART_RXWM);
/* disable RX timeout interrups */
writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
return ret;
}
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* Set TX/RX Reset */
ctrl_reg = readl(port->membase + CDNS_UART_CR);
clk_disable(cdns_uart->uartclk);
clk_disable(cdns_uart->pclk);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
} else {
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
/* restore original rx trigger level */
writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
/* enable RX timeout interrupt */
writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
return uart_resume_port(cdns_uart->cdns_uart_driver, port);
for_each_console(c) {
if (!c->device)
continue;
- if (!c->write)
- continue;
+ if (c->flags & CON_NBCON) {
+ if (!c->write_atomic &&
+ !(c->write_thread && c->kthread)) {
+ continue;
+ }
+ } else {
+ if (!c->write)
+ continue;
+ }
if ((c->flags & CON_ENABLED) == 0)
continue;
cs[i++] = c;
{ CON_ENABLED, 'E' },
{ CON_CONSDEV, 'C' },
{ CON_BOOT, 'B' },
+ { CON_NBCON, 'N' },
{ CON_PRINTBUFFER, 'p' },
{ CON_BRL, 'b' },
{ CON_ANYTIME, 'a' },
};
char flags[ARRAY_SIZE(con_flags) + 1];
struct console *con = v;
+ char con_write = '-';
unsigned int a;
dev_t dev = 0;
seq_setwidth(m, 21 - 1);
seq_printf(m, "%s%d", con->name, con->index);
seq_pad(m, ' ');
- seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-',
- con->write ? 'W' : '-', con->unblank ? 'U' : '-',
- flags);
+ if (con->flags & CON_NBCON) {
+ if (con->write_atomic || con->write_thread)
+ con_write = 'W';
+ } else {
+ if (con->write)
+ con_write = 'W';
+ }
+ seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-', con_write,
+ con->unblank ? 'U' : '-', flags);
if (dev)
seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev));
#ifdef CONFIG_PREEMPT_RT
extern bool local_bh_blocked(void);
+extern void softirq_preempt(void);
#else
static inline bool local_bh_blocked(void) { return false; }
+static inline void softirq_preempt(void) { }
#endif
#endif /* _LINUX_BH_H */
#include <linux/atomic.h>
#include <linux/bits.h>
+#include <linux/irq_work.h>
#include <linux/rculist.h>
+#include <linux/rcuwait.h>
#include <linux/types.h>
struct vc_data;
* /dev/kmesg which requires a larger output buffer.
* @CON_SUSPENDED: Indicates if a console is suspended. If true, the
* printing callbacks must not be called.
+ * @CON_NBCON: Console can operate outside of the legacy style console_lock
+ * constraints.
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
CON_BRL = BIT(5),
CON_EXTENDED = BIT(6),
CON_SUSPENDED = BIT(7),
+ CON_NBCON = BIT(8),
+};
+
+/**
+ * struct nbcon_state - console state for nbcon consoles
+ * @atom: Compound of the state fields for atomic operations
+ *
+ * @req_prio: The priority of a handover request
+ * @prio: The priority of the current owner
+ * @unsafe: Console is busy in a non takeover region
+ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
+ * past. The console cannot be safe until re-initialized.
+ * @cpu: The CPU on which the owner runs
+ *
+ * To be used for reading and preparing of the value stored in the nbcon
+ * state variable @console::nbcon_state.
+ *
+ * The @prio and @req_prio fields are particularly important to allow
+ * spin-waiting to timeout and give up without the risk of a waiter being
+ * assigned the lock after giving up.
+ */
+struct nbcon_state {
+ union {
+ unsigned int atom;
+ struct {
+ unsigned int prio : 2;
+ unsigned int req_prio : 2;
+ unsigned int unsafe : 1;
+ unsigned int unsafe_takeover : 1;
+ unsigned int cpu : 24;
+ };
+ };
+};
+
+/*
+ * The nbcon_state struct is used to easily create and interpret values that
+ * are stored in the @console::nbcon_state variable. Ensure this struct stays
+ * within the size boundaries of the atomic variable's underlying type in
+ * order to avoid any accidental truncation.
+ */
+static_assert(sizeof(struct nbcon_state) <= sizeof(int));
+
+/**
+ * nbcon_prio - console owner priority for nbcon consoles
+ * @NBCON_PRIO_NONE: Unused
+ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
+ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
+ * @NBCON_PRIO_PANIC: Panic output
+ * @NBCON_PRIO_MAX: The number of priority levels
+ *
+ * A higher priority context can takeover the console when it is
+ * in the safe state. The final attempt to flush consoles in panic()
+ * can be allowed to do so even in an unsafe state (Hope and pray).
+ */
+enum nbcon_prio {
+ NBCON_PRIO_NONE = 0,
+ NBCON_PRIO_NORMAL,
+ NBCON_PRIO_EMERGENCY,
+ NBCON_PRIO_PANIC,
+ NBCON_PRIO_MAX,
+};
+
+struct console;
+struct printk_buffers;
+
+/**
+ * struct nbcon_context - Context for console acquire/release
+ * @console: The associated console
+ * @spinwait_max_us: Limit for spin-wait acquire
+ * @prio: Priority of the context
+ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
+ * @backlog: Ringbuffer has pending records
+ * @pbufs: Pointer to the text buffer for this context
+ * @seq: The sequence number to print for this context
+ */
+struct nbcon_context {
+ /* members set by caller */
+ struct console *console;
+ unsigned int spinwait_max_us;
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
+
+ /* members set by emit */
+ unsigned int backlog : 1;
+
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
+ u64 seq;
+};
+
+/**
+ * struct nbcon_write_context - Context handed to the nbcon write callbacks
+ * @ctxt: The core console context
+ * @outbuf: Pointer to the text buffer for output
+ * @len: Length to write
+ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ */
+struct nbcon_write_context {
+ struct nbcon_context __private ctxt;
+ char *outbuf;
+ unsigned int len;
+ bool unsafe_takeover;
};
/**
* @dropped: Number of unreported dropped ringbuffer records
* @data: Driver private data
* @node: hlist node for the console list
+ *
+ * @write_atomic: Write callback for atomic context
+ * @write_thread: Write callback for non-atomic context
+ * @driver_enter: Callback to begin synchronization with driver code
+ * @driver_exit: Callback to finish synchronization with driver code
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
+ * @locked_port: True, if the port lock is locked by nbcon
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
+ * @irq_work: Defer @kthread waking to IRQ work context
*/
struct console {
char name[16];
unsigned long dropped;
void *data;
struct hlist_node node;
+
+ /* nbcon console specific members */
+ bool (*write_atomic)(struct console *con,
+ struct nbcon_write_context *wctxt);
+ bool (*write_thread)(struct console *con,
+ struct nbcon_write_context *wctxt);
+ void (*driver_enter)(struct console *con, unsigned long *flags);
+ void (*driver_exit)(struct console *con, unsigned long flags);
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
+ bool locked_port;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
+ struct irq_work irq_work;
};
#ifdef CONFIG_LOCKDEP
lockdep_assert_console_list_lock_held(); \
hlist_for_each_entry(con, &console_list, node)
+#ifdef CONFIG_PRINTK
+extern void nbcon_cpu_emergency_enter(void);
+extern void nbcon_cpu_emergency_exit(void);
+extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+extern void nbcon_reacquire(struct nbcon_write_context *wctxt);
+#else
+static inline void nbcon_cpu_emergency_enter(void) { }
+static inline void nbcon_cpu_emergency_exit(void) { }
+static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_reacquire(struct nbcon_write_context *wctxt) { }
+#endif
+
extern int console_set_on_cmdline;
extern struct console *early_console;
#define EXIT_TO_USER_MODE_WORK \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
- ARCH_EXIT_TO_USER_MODE_WORK)
+ _TIF_NEED_RESCHED_LAZY | ARCH_EXIT_TO_USER_MODE_WORK)
/**
* arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
#define XFER_TO_GUEST_MODE_WORK \
(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
- _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
+ _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED_LAZY | ARCH_XFER_TO_GUEST_MODE_WORK)
struct kvm_vcpu;
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+#ifdef CONFIG_PREEMPT_RT
+DECLARE_PER_CPU(struct task_struct *, timersd);
+DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
+
+extern void raise_timer_softirq(void);
+extern void raise_hrtimer_softirq(void);
+
+static inline unsigned int local_pending_timers(void)
+{
+ return __this_cpu_read(pending_timer_softirq);
+}
+
+#else
+static inline void raise_timer_softirq(void)
+{
+ raise_softirq(TIMER_SOFTIRQ);
+}
+
+static inline void raise_hrtimer_softirq(void)
+{
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+}
+
+static inline unsigned int local_pending_timers(void)
+{
+ return local_softirq_pending();
+}
+#endif
+
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
static inline struct task_struct *this_cpu_ksoftirqd(void)
int defer_count;
int defer_ipi_scheduled;
struct sk_buff *defer_list;
+#ifndef CONFIG_PREEMPT_RT
call_single_data_t defer_csd;
+#else
+ struct work_struct defer_work;
+#endif
};
static inline void input_queue_head_incr(struct softnet_data *sd)
#include <linux/ratelimit_types.h>
#include <linux/once_lite.h>
+struct uart_port;
+
extern const char linux_banner[];
extern const char linux_proc_banner[];
extern void __printk_safe_enter(void);
extern void __printk_safe_exit(void);
+extern void __printk_deferred_enter(void);
+extern void __printk_deferred_exit(void);
+
/*
* The printk_deferred_enter/exit macros are available only as a hack for
* some code paths that need to defer all printk console printing. Interrupts
* must be disabled for the deferred duration.
*/
-#define printk_deferred_enter __printk_safe_enter
-#define printk_deferred_exit __printk_safe_exit
+#define printk_deferred_enter() __printk_deferred_enter()
+#define printk_deferred_exit() __printk_deferred_exit()
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
+void printk_legacy_allow_panic_sync(void);
+extern void nbcon_acquire(struct uart_port *up);
+extern void nbcon_release(struct uart_port *up);
+void nbcon_atomic_flush_unsafe(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
static inline void printk_trigger_flush(void)
{
}
+
+static inline void printk_legacy_allow_panic_sync(void)
+{
+}
+
+static inline void nbcon_acquire(struct uart_port *up)
+{
+}
+
+static inline void nbcon_release(struct uart_port *up)
+{
+}
+
+static inline void nbcon_atomic_flush_unsafe(void)
+{
+}
+
#endif
#ifdef CONFIG_SMP
* ->sched_remote_wakeup gets used, so it can be in this word.
*/
unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_RT_MUTEXES
+ unsigned sched_rt_mutex:1;
+#endif
/* Bit to tell LSMs we're in execve(): */
unsigned in_execve:1;
}
#endif
+extern bool task_is_pi_boosted(const struct task_struct *p);
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
update_ti_thread_flag(task_thread_info(tsk), flag, value);
}
-static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
+static inline bool test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}
-static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
+static inline bool test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}
-static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
+static inline bool test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+ if (IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO))
+ clear_tsk_thread_flag(tsk, TIF_NEED_RESCHED_LAZY);
}
-static inline int test_tsk_need_resched(struct task_struct *tsk)
+static inline bool test_tsk_need_resched(struct task_struct *tsk)
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
static __always_inline bool need_resched(void)
{
- return unlikely(tif_need_resched());
+ return unlikely(tif_need_resched_lazy() || tif_need_resched());
}
/*
*/
smp_mb__after_atomic();
- return unlikely(tif_need_resched());
+ return unlikely(need_resched());
}
static __always_inline bool __must_check current_clr_polling_and_test(void)
*/
smp_mb__after_atomic();
- return unlikely(tif_need_resched());
+ return unlikely(need_resched());
}
#else
static inline bool __must_check current_set_polling_and_test(void)
{
- return unlikely(tif_need_resched());
+ return unlikely(need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
- return unlikely(tif_need_resched());
+ return unlikely(need_resched());
}
#endif
}
#ifdef CONFIG_RT_MUTEXES
+extern void rt_mutex_pre_schedule(void);
+extern void rt_mutex_schedule(void);
+extern void rt_mutex_post_schedule(void);
+
/*
* Must hold either p->pi_lock or task_rq(p)->lock.
*/
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
unsigned char msr_saved_flags;
+ bool console_newline_needed;
+
struct uart_8250_dma *dma;
const struct uart_8250_ops *ops;
void serial8250_set_defaults(struct uart_8250_port *up);
void serial8250_console_write(struct uart_8250_port *up, const char *s,
unsigned int count);
+bool serial8250_console_write_atomic(struct uart_8250_port *up,
+ struct nbcon_write_context *wctxt);
+bool serial8250_console_write_thread(struct uart_8250_port *up,
+ struct nbcon_write_context *wctxt);
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
int serial8250_console_exit(struct uart_port *port);
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
+ nbcon_acquire(up);
}
/**
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
+ nbcon_acquire(up);
}
/**
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
+ nbcon_acquire(up);
}
/**
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
- return spin_trylock(&up->lock);
+ if (!spin_trylock(&up->lock))
+ return false;
+
+ nbcon_acquire(up);
+ return true;
}
/**
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
- return spin_trylock_irqsave(&up->lock, *flags);
+ if (!spin_trylock_irqsave(&up->lock, *flags))
+ return false;
+
+ nbcon_acquire(up);
+ return true;
}
/**
*/
static inline void uart_port_unlock(struct uart_port *up)
{
+ nbcon_release(up);
spin_unlock(&up->lock);
}
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
+ nbcon_release(up);
spin_unlock_irq(&up->lock);
}
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
+ nbcon_release(up);
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
+/* Only for use in the console->driver_enter() callback. */
+static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/* Only for use in the console->driver_exit() callback. */
+static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
spin_unlock_irqrestore(&up->lock, flags);
}
u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
}
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
#include <asm/thread_info.h>
+#ifdef CONFIG_PREEMPT_BUILD_AUTO
+# define TIF_NEED_RESCHED_LAZY TIF_ARCH_RESCHED_LAZY
+# define _TIF_NEED_RESCHED_LAZY _TIF_ARCH_RESCHED_LAZY
+# define TIF_NEED_RESCHED_LAZY_OFFSET (TIF_NEED_RESCHED_LAZY - TIF_NEED_RESCHED)
+#else
+# define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
+# define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
+# define TIF_NEED_RESCHED_LAZY_OFFSET 0
+#endif
+
#ifdef __KERNEL__
#ifndef arch_set_restart_data
(unsigned long *)(¤t_thread_info()->flags));
}
+static __always_inline bool tif_need_resched_lazy(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
+ arch_test_bit(TIF_NEED_RESCHED_LAZY,
+ (unsigned long *)(¤t_thread_info()->flags));
+}
+
#else
static __always_inline bool tif_need_resched(void)
(unsigned long *)(¤t_thread_info()->flags));
}
+static __always_inline bool tif_need_resched_lazy(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
+ test_bit(TIF_NEED_RESCHED_LAZY,
+ (unsigned long *)(¤t_thread_info()->flags));
+}
+
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
- TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_NEED_RESCHED = 0x02,
+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
{
- return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
+ return tracing_gen_ctx_irq_test(0);
}
static inline unsigned int tracing_gen_ctx(void)
{
- return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
+ return tracing_gen_ctx_irq_test(0);
}
#endif
select PREEMPTION
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+config PREEMPT_BUILD_AUTO
+ bool
+ select PREEMPT_BUILD
+
+config HAVE_PREEMPT_AUTO
+ bool
+
choice
prompt "Preemption Model"
default PREEMPT_NONE
embedded system with latency requirements in the milliseconds
range.
+config PREEMPT_AUTO
+ bool "Automagic preemption mode with runtime tweaking support"
+ depends on HAVE_PREEMPT_AUTO
+ select PREEMPT_BUILD_AUTO
+ help
+ Add some sensible blurb here
+
config PREEMPT_RT
bool "Fully Preemptible Kernel (Real-Time)"
depends on EXPERT && ARCH_SUPPORTS_RT
+ select PREEMPT_BUILD_AUTO if HAVE_PREEMPT_AUTO
select PREEMPTION
help
This option turns the kernel into a real-time kernel by replacing
config PREEMPT_DYNAMIC
bool "Preemption behaviour defined on boot"
- depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
+ depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT && !PREEMPT_AUTO
select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
select PREEMPT_BUILD
default y if HAVE_PREEMPT_DYNAMIC_CALL
local_irq_enable_exit_to_user(ti_work);
- if (ti_work & _TIF_NEED_RESCHED)
+ if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
schedule();
if (ti_work & _TIF_UPROBE)
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
- if (need_resched())
+ if (test_tsk_need_resched(current))
preempt_schedule_irq();
}
}
return -EINTR;
}
- if (ti_work & _TIF_NEED_RESCHED)
+ if (ti_work & (_TIF_NEED_RESCHED | TIF_NEED_RESCHED_LAZY))
schedule();
if (ti_work & _TIF_NOTIFY_RESUME)
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/slab.h>
+#include <linux/sched/rt.h>
#include <linux/sched/task.h>
#include "futex.h"
/*
* Caller must hold a reference on @pi_state.
*/
-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
+static int wake_futex_pi(u32 __user *uaddr, u32 uval,
+ struct futex_pi_state *pi_state,
+ struct rt_mutex_waiter *top_waiter)
{
- struct rt_mutex_waiter *top_waiter;
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_RT_WAKE_Q(wqh);
u32 curval, newval;
int ret = 0;
- top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
- if (WARN_ON_ONCE(!top_waiter)) {
- /*
- * As per the comment in futex_unlock_pi() this should not happen.
- *
- * When this happens, give up our locks and try again, giving
- * the futex_lock_pi() instance time to complete, either by
- * waiting on the rtmutex or removing itself from the futex
- * queue.
- */
- ret = -EAGAIN;
- goto out_unlock;
- }
-
new_owner = top_waiter->task;
/*
goto no_block;
}
+ /*
+ * Must be done before we enqueue the waiter, here is unfortunately
+ * under the hb lock, but that *should* work because it does nothing.
+ */
+ rt_mutex_pre_schedule();
+
rt_mutex_init_waiter(&rt_waiter);
/*
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
cleanup:
- spin_lock(q.lock_ptr);
/*
* If we failed to acquire the lock (deadlock/signal/timeout), we must
- * first acquire the hb->lock before removing the lock from the
- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
- * lists consistent.
+ * must unwind the above, however we canont lock hb->lock because
+ * rt_mutex already has a waiter enqueued and hb->lock can itself try
+ * and enqueue an rt_waiter through rtlock.
+ *
+ * Doing the cleanup without holding hb->lock can cause inconsistent
+ * state between hb and pi_state, but only in the direction of not
+ * seeing a waiter that is leaving.
+ *
+ * See futex_unlock_pi(), it deals with this inconsistency.
+ *
+ * There be dragons here, since we must deal with the inconsistency on
+ * the way out (here), it is impossible to detect/warn about the race
+ * the other way around (missing an incoming waiter).
*
- * In particular; it is important that futex_unlock_pi() can not
- * observe this inconsistency.
+ * What could possibly go wrong...
*/
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
ret = 0;
+ /*
+ * Now that the rt_waiter has been dequeued, it is safe to use
+ * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up
+ * the
+ */
+ spin_lock(q.lock_ptr);
+ /*
+ * Waiter is unqueued.
+ */
+ rt_mutex_post_schedule();
no_block:
/*
* Fixup the pi_state owner and possibly acquire the lock if we
top_waiter = futex_top_waiter(hb, &key);
if (top_waiter) {
struct futex_pi_state *pi_state = top_waiter->pi_state;
+ struct rt_mutex_waiter *rt_waiter;
ret = -EINVAL;
if (!pi_state)
if (pi_state->owner != current)
goto out_unlock;
- get_pi_state(pi_state);
/*
* By taking wait_lock while still holding hb->lock, we ensure
- * there is no point where we hold neither; and therefore
- * wake_futex_p() must observe a state consistent with what we
- * observed.
+ * there is no point where we hold neither; and thereby
+ * wake_futex_pi() must observe any new waiters.
+ *
+ * Since the cleanup: case in futex_lock_pi() removes the
+ * rt_waiter without holding hb->lock, it is possible for
+ * wake_futex_pi() to not find a waiter while the above does,
+ * in this case the waiter is on the way out and it can be
+ * ignored.
*
* In particular; this forces __rt_mutex_start_proxy() to
* complete such that we're guaranteed to observe the
- * rt_waiter. Also see the WARN in wake_futex_pi().
+ * rt_waiter.
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+
+ /*
+ * Futex vs rt_mutex waiter state -- if there are no rt_mutex
+ * waiters even though futex thinks there are, then the waiter
+ * is leaving and the uncontended path is safe to take.
+ */
+ rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
+ if (!rt_waiter) {
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ goto do_uncontended;
+ }
+
+ get_pi_state(pi_state);
spin_unlock(&hb->lock);
/* drops pi_state->pi_mutex.wait_lock */
- ret = wake_futex_pi(uaddr, uval, pi_state);
+ ret = wake_futex_pi(uaddr, uval, pi_state, rt_waiter);
put_pi_state(pi_state);
return ret;
}
+do_uncontended:
/*
* We have no kernel internal state, i.e. no waiters in the
* kernel. Waiters which are about to queue themselves are stuck
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
- /* Current is not longer pi_blocked_on */
- spin_lock(q.lock_ptr);
+ /*
+ * See futex_unlock_pi()'s cleanup: comment.
+ */
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
ret = 0;
+ spin_lock(q.lock_ptr);
debug_rt_mutex_free_waiter(&rt_waiter);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
#endif /* CONFIG_CRASH_CORE */
+#if defined(CONFIG_PREEMPT_RT)
+static ssize_t realtime_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", 1);
+}
+KERNEL_ATTR_RO(realtime);
+#endif
+
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
&rcu_expedited_attr.attr,
&rcu_normal_attr.attr,
#endif
+#ifdef CONFIG_PREEMPT_RT
+ &realtime_attr.attr,
+#endif
NULL
};
#include <linux/kprobes.h>
#include <linux/lockdep.h>
#include <linux/context_tracking.h>
+#include <linux/console.h>
#include <asm/sections.h>
if (!debug_locks_off() || debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("================================\n");
pr_warn("WARNING: inconsistent lock state\n");
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
/*
return try_cmpxchg_acquire(&lock->owner, &old, new);
}
+static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
+{
+ return rt_mutex_cmpxchg_acquire(lock, NULL, current);
+}
+
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
}
+static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
+
+static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
+{
+ /*
+ * With debug enabled rt_mutex_cmpxchg trylock() will always fail.
+ *
+ * Avoid unconditionally taking the slow path by using
+ * rt_mutex_slow_trylock() which is covered by the debug code and can
+ * acquire a non-contended rtmutex.
+ */
+ return rt_mutex_slowtrylock(lock);
+}
+
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
- schedule();
+ rt_mutex_schedule();
raw_spin_lock_irq(&lock->wait_lock);
set_current_state(state);
WARN(1, "rtmutex deadlock detected\n");
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- schedule();
+ rt_mutex_schedule();
}
}
int ret;
/*
+ * Do all pre-schedule work here, before we queue a waiter and invoke
+ * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
+ * otherwise recurse back into task_blocks_on_rt_mutex() through
+ * rtlock_slowlock() and will then enqueue a second waiter for this
+ * same task and things get really confusing real fast.
+ */
+ rt_mutex_pre_schedule();
+
+ /*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
* be called in early boot if the cmpxchg() fast path is disabled
* (debug, no architecture support). In this case we will acquire the
raw_spin_lock_irqsave(&lock->wait_lock, flags);
ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ rt_mutex_post_schedule();
return ret;
}
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
unsigned int state)
{
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ lockdep_assert(!current->pi_blocked_on);
+
+ if (likely(rt_mutex_try_acquire(lock)))
return 0;
return rt_mutex_slowlock(lock, NULL, state);
struct rt_mutex_base *rtm = &rwb->rtmutex;
int ret;
+ rwbase_pre_schedule();
raw_spin_lock_irq(&rtm->wait_lock);
/*
rwbase_rtmutex_unlock(rtm);
trace_contention_end(rwb, ret);
+ rwbase_post_schedule();
return ret;
}
static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
unsigned int state)
{
+ lockdep_assert(!current->pi_blocked_on);
+
if (rwbase_read_trylock(rwb))
return 0;
/* Force readers into slow path */
atomic_sub(READER_BIAS, &rwb->readers);
+ rwbase_pre_schedule();
+
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
if (__rwbase_write_trylock(rwb))
goto out_unlock;
if (rwbase_signal_pending_state(state, current)) {
rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags);
+ rwbase_post_schedule();
trace_contention_end(rwb, -EINTR);
return -EINTR;
}
out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+ rwbase_post_schedule();
return 0;
}
#define rwbase_signal_pending_state(state, current) \
signal_pending_state(state, current)
+#define rwbase_pre_schedule() \
+ rt_mutex_pre_schedule()
+
#define rwbase_schedule() \
- schedule()
+ rt_mutex_schedule()
+
+#define rwbase_post_schedule() \
+ rt_mutex_post_schedule()
#include "rwbase_rt.c"
static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
{
+ lockdep_assert(!current->pi_blocked_on);
+
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
rtlock_slowlock(rtm);
}
#define rwbase_signal_pending_state(state, current) (0)
+#define rwbase_pre_schedule()
+
#define rwbase_schedule() \
schedule_rtlock()
+#define rwbase_post_schedule()
+
#include "rwbase_rt.c"
/*
* The common functions which get wrapped into the rwlock API.
}
mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
- if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
+ if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
if (ww_ctx)
ww_mutex_set_context_fastpath(lock, ww_ctx);
return 0;
*/
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+ printk_legacy_allow_panic_sync();
+
panic_print_sys_info(false);
kmsg_dump(KMSG_DUMP_PANIC);
/* Do not scroll important messages printed above */
suppress_printk = 1;
+
+ /*
+ * The final messages may not have been printed if in a context that
+ * defers printing (such as NMI) and irq_work is not available.
+ * Explicitly flush the kernel log buffer one last time.
+ */
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+ nbcon_atomic_flush_unsafe();
+
local_irq_enable();
for (i = 0; ; i += PANIC_TIMER_STEP) {
touch_softlockup_watchdog();
*/
void oops_enter(void)
{
+ nbcon_cpu_emergency_enter();
tracing_off();
/* can't trust the integrity of the kernel anymore: */
debug_locks_off();
{
do_oops_enter_exit();
print_oops_end_marker();
+ nbcon_cpu_emergency_exit();
kmsg_dump(KMSG_DUMP_OOPS);
}
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args)
{
+ nbcon_cpu_emergency_enter();
+
disable_trace_on_warning();
if (file)
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
+
+ nbcon_cpu_emergency_exit();
}
#ifdef CONFIG_BUG
# SPDX-License-Identifier: GPL-2.0-only
obj-y = printk.o
-obj-$(CONFIG_PRINTK) += printk_safe.o
+obj-$(CONFIG_PRINTK) += printk_safe.o nbcon.o
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
obj-$(CONFIG_PRINTK_INDEX) += index.o
* internal.h - printk internal definitions
*/
#include <linux/percpu.h>
+#include <linux/console.h>
+#include "printk_ringbuffer.h"
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
void __init printk_sysctl_init(void);
#define printk_sysctl_init() do { } while (0)
#endif
+#define con_printk(lvl, con, fmt, ...) \
+ printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \
+ (con->flags & CON_NBCON) ? "" : "legacy ", \
+ (con->flags & CON_BOOT) ? "boot" : "", \
+ con->name, con->index, ##__VA_ARGS__)
+
#ifdef CONFIG_PRINTK
#ifdef CONFIG_PRINTK_CALLER
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
+extern struct printk_ringbuffer *prb;
+extern bool printk_threads_enabled;
+extern bool have_legacy_console;
+extern bool have_boot_console;
+
+/*
+ * Specifies if the console lock/unlock dance is needed for console
+ * printing. If @have_boot_console is true, the nbcon consoles will
+ * be printed serially along with the legacy consoles because nbcon
+ * consoles cannot print simultaneously with boot consoles.
+ */
+#define printing_via_unlock (have_legacy_console || have_boot_console)
+
__printf(4, 0)
int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info,
u16 printk_parse_prefix(const char *text, int *level,
enum printk_info_flags *flags);
+void console_lock_spinning_enable(void);
+int console_lock_spinning_disable_and_check(int cookie);
+
+u64 nbcon_seq_read(struct console *con);
+void nbcon_seq_force(struct console *con, u64 seq);
+bool nbcon_alloc(struct console *con);
+void nbcon_init(struct console *con);
+void nbcon_free(struct console *con);
+enum nbcon_prio nbcon_get_default_prio(void);
+void nbcon_atomic_flush_all(void);
+bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie);
+void nbcon_kthread_create(struct console *con);
+void nbcon_wake_threads(void);
+void nbcon_legacy_kthread_create(void);
+
+/*
+ * Check if the given console is currently capable and allowed to print
+ * records. Note that this function does not consider the current context,
+ * which can also play a role in deciding if @con can be used to print
+ * records.
+ */
+static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
+{
+ if (!(flags & CON_ENABLED))
+ return false;
+
+ if ((flags & CON_SUSPENDED))
+ return false;
+
+ if (flags & CON_NBCON) {
+ if (use_atomic) {
+ if (!con->write_atomic)
+ return false;
+ } else {
+ if (!con->write_thread || !con->kthread)
+ return false;
+ }
+ } else {
+ if (!con->write)
+ return false;
+ }
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
+ * allocated. So unless they're explicitly marked as being able to
+ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
+ */
+ if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
+ return false;
+
+ return true;
+}
+
+/**
+ * nbcon_kthread_wake - Wake up a printk thread
+ * @con: Console to operate on
+ */
+static inline void nbcon_kthread_wake(struct console *con)
+{
+ /*
+ * Guarantee any new records can be seen by tasks preparing to wait
+ * before this context checks if the rcuwait is empty.
+ *
+ * The full memory barrier in rcuwait_wake_up() pairs with the full
+ * memory barrier within set_current_state() of
+ * ___rcuwait_wait_event(), which is called after prepare_to_rcuwait()
+ * adds the waiter but before it has checked the wait condition.
+ *
+ * This pairs with nbcon_kthread_func:A.
+ */
+ rcuwait_wake_up(&con->rcuwait); /* LMM(nbcon_kthread_wake:A) */
+}
+
#else
#define PRINTK_PREFIX_MAX 0
#define PRINTK_MESSAGE_MAX 0
#define PRINTKRB_RECORD_MAX 0
+static inline void nbcon_kthread_wake(struct console *con) { }
+static inline void nbcon_kthread_create(struct console *con) { }
+#define printk_threads_enabled (false)
+#define printing_via_unlock (false)
+
/*
* In !PRINTK builds we still export console_sem
* semaphore and some of console functions (console_unlock()/etc.), so
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
static inline bool printk_percpu_data_ready(void) { return false; }
+static inline u64 nbcon_seq_read(struct console *con) { return 0; }
+static inline void nbcon_seq_force(struct console *con, u64 seq) { }
+static inline bool nbcon_alloc(struct console *con) { return false; }
+static inline void nbcon_init(struct console *con) { }
+static inline void nbcon_free(struct console *con) { }
+static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
+static inline void nbcon_atomic_flush_all(void) { }
+static inline bool nbcon_atomic_emit_next_record(struct console *con, bool *handover,
+ int cookie) { return false; }
+
+static inline bool console_is_usable(struct console *con, short flags,
+ bool use_atomic) { return false; }
+
#endif /* CONFIG_PRINTK */
+extern struct printk_buffers printk_shared_pbufs;
+
/**
* struct printk_buffers - Buffers to read/format/output printk messages.
* @outbuf: After formatting, contains text to output.
};
bool other_cpu_in_panic(void);
+bool this_cpu_in_panic(void);
+bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+ bool is_extended, bool may_supress);
+
+#ifdef CONFIG_PRINTK
+void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH, John Ogness
+// Copyright (C) 2022 Intel, Thomas Gleixner
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/serial_core.h>
+#include <linux/syscore_ops.h>
+#include "printk_ringbuffer.h"
+#include "internal.h"
+/*
+ * Printk console printing implementation for consoles which does not depend
+ * on the legacy style console_lock mechanism.
+ *
+ * The state of the console is maintained in the "nbcon_state" atomic
+ * variable.
+ *
+ * The console is locked when:
+ *
+ * - The 'prio' field contains the priority of the context that owns the
+ * console. Only higher priority contexts are allowed to take over the
+ * lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
+ *
+ * - The 'cpu' field denotes on which CPU the console is locked. It is used
+ * to prevent busy waiting on the same CPU. Also it informs the lock owner
+ * that it has lost the lock in a more complex scenario when the lock was
+ * taken over by a higher priority context, released, and taken on another
+ * CPU with the same priority as the interrupted owner.
+ *
+ * The acquire mechanism uses a few more fields:
+ *
+ * - The 'req_prio' field is used by the handover approach to make the
+ * current owner aware that there is a context with a higher priority
+ * waiting for the friendly handover.
+ *
+ * - The 'unsafe' field allows to take over the console in a safe way in the
+ * middle of emitting a message. The field is set only when accessing some
+ * shared resources or when the console device is manipulated. It can be
+ * cleared, for example, after emitting one character when the console
+ * device is in a consistent state.
+ *
+ * - The 'unsafe_takeover' field is set when a hostile takeover took the
+ * console in an unsafe state. The console will stay in the unsafe state
+ * until re-initialized.
+ *
+ * The acquire mechanism uses three approaches:
+ *
+ * 1) Direct acquire when the console is not owned or is owned by a lower
+ * priority context and is in a safe state.
+ *
+ * 2) Friendly handover mechanism uses a request/grant handshake. It is used
+ * when the current owner has lower priority and the console is in an
+ * unsafe state.
+ *
+ * The requesting context:
+ *
+ * a) Sets its priority into the 'req_prio' field.
+ *
+ * b) Waits (with a timeout) for the owning context to unlock the
+ * console.
+ *
+ * c) Takes the lock and clears the 'req_prio' field.
+ *
+ * The owning context:
+ *
+ * a) Observes the 'req_prio' field set on exit from the unsafe
+ * console state.
+ *
+ * b) Gives up console ownership by clearing the 'prio' field.
+ *
+ * 3) Unsafe hostile takeover allows to take over the lock even when the
+ * console is an unsafe state. It is used only in panic() by the final
+ * attempt to flush consoles in a try and hope mode.
+ *
+ * Note that separate record buffers are used in panic(). As a result,
+ * the messages can be read and formatted without any risk even after
+ * using the hostile takeover in unsafe state.
+ *
+ * The release function simply clears the 'prio' field.
+ *
+ * All operations on @console::nbcon_state are atomic cmpxchg based to
+ * handle concurrency.
+ *
+ * The acquire/release functions implement only minimal policies:
+ *
+ * - Preference for higher priority contexts.
+ * - Protection of the panic CPU.
+ *
+ * All other policy decisions must be made at the call sites:
+ *
+ * - What is marked as an unsafe section.
+ * - Whether to spin-wait if there is already an owner and the console is
+ * in an unsafe state.
+ * - Whether to attempt an unsafe hostile takeover.
+ *
+ * The design allows to implement the well known:
+ *
+ * acquire()
+ * output_one_printk_record()
+ * release()
+ *
+ * The output of one printk record might be interrupted with a higher priority
+ * context. The new owner is supposed to reprint the entire interrupted record
+ * from scratch.
+ */
+
+/**
+ * nbcon_state_set - Helper function to set the console state
+ * @con: Console to update
+ * @new: The new state to write
+ *
+ * Only to be used when the console is not yet or no longer visible in the
+ * system. Otherwise use nbcon_state_try_cmpxchg().
+ */
+static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
+{
+ atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
+}
+
+/**
+ * nbcon_state_read - Helper function to read the console state
+ * @con: Console to read
+ * @state: The state to store the result
+ */
+static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
+{
+ state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
+}
+
+/**
+ * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
+ * @con: Console to update
+ * @cur: Old/expected state
+ * @new: New state
+ *
+ * Return: True on success. False on fail and @cur is updated.
+ */
+static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
+ struct nbcon_state *new)
+{
+ return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
+}
+
+/**
+ * nbcon_seq_read - Read the current console sequence
+ * @con: Console to read the sequence of
+ *
+ * Return: Sequence number of the next record to print on @con.
+ */
+u64 nbcon_seq_read(struct console *con)
+{
+ unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
+
+ return __ulseq_to_u64seq(prb, nbcon_seq);
+}
+
+/**
+ * nbcon_seq_force - Force console sequence to a specific value
+ * @con: Console to work on
+ * @seq: Sequence number value to set
+ *
+ * Only to be used during init (before registration) or in extreme situations
+ * (such as panic with CONSOLE_REPLAY_ALL).
+ */
+void nbcon_seq_force(struct console *con, u64 seq)
+{
+ /*
+ * If the specified record no longer exists, the oldest available record
+ * is chosen. This is especially important on 32bit systems because only
+ * the lower 32 bits of the sequence number are stored. The upper 32 bits
+ * are derived from the sequence numbers available in the ringbuffer.
+ */
+ u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
+
+ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
+
+ /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
+ con->seq = 0;
+}
+
+/**
+ * nbcon_seq_try_update - Try to update the console sequence number
+ * @ctxt: Pointer to an acquire context that contains
+ * all information about the acquire mode
+ * @new_seq: The new sequence number to set
+ *
+ * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
+ * the 64bit value). This could be a different value than @new_seq if
+ * nbcon_seq_force() was used or the current context no longer owns the
+ * console. In the later case, it will stop printing anyway.
+ */
+static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
+{
+ unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq);
+ struct console *con = ctxt->console;
+
+ if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
+ __u64seq_to_ulseq(new_seq))) {
+ ctxt->seq = new_seq;
+ } else {
+ ctxt->seq = nbcon_seq_read(con);
+ }
+}
+
+bool printk_threads_enabled __ro_after_init;
+
+/**
+ * nbcon_context_try_acquire_direct - Try to acquire directly
+ * @ctxt: The context of the caller
+ * @cur: The current console state
+ *
+ * Acquire the console when it is released. Also acquire the console when
+ * the current owner has a lower priority and the console is in a safe state.
+ *
+ * Return: 0 on success. Otherwise, an error code on failure. Also @cur
+ * is updated to the latest state when failed to modify it.
+ *
+ * Errors:
+ *
+ * -EPERM: A panic is in progress and this is not the panic CPU.
+ * Or the current owner or waiter has the same or higher
+ * priority. No acquire method can be successful in
+ * this case.
+ *
+ * -EBUSY: The current owner has a lower priority but the console
+ * in an unsafe state. The caller should try using
+ * the handover acquire method.
+ */
+static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
+ struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state new;
+
+ do {
+ if (other_cpu_in_panic())
+ return -EPERM;
+
+ if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
+ return -EPERM;
+
+ if (cur->unsafe)
+ return -EBUSY;
+
+ /*
+ * The console should never be safe for a direct acquire
+ * if an unsafe hostile takeover has ever happened.
+ */
+ WARN_ON_ONCE(cur->unsafe_takeover);
+
+ new.atom = cur->atom;
+ new.prio = ctxt->prio;
+ new.req_prio = NBCON_PRIO_NONE;
+ new.unsafe = cur->unsafe_takeover;
+ new.cpu = cpu;
+
+ } while (!nbcon_state_try_cmpxchg(con, cur, &new));
+
+ return 0;
+}
+
+static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
+{
+ /*
+ * The request context is well defined by the @req_prio because:
+ *
+ * - Only a context with a higher priority can take over the request.
+ * - There are only three priorities.
+ * - Only one CPU is allowed to request PANIC priority.
+ * - Lower priorities are ignored during panic() until reboot.
+ *
+ * As a result, the following scenario is *not* possible:
+ *
+ * 1. Another context with a higher priority directly takes ownership.
+ * 2. The higher priority context releases the ownership.
+ * 3. A lower priority context takes the ownership.
+ * 4. Another context with the same priority as this context
+ * creates a request and starts waiting.
+ */
+
+ return (cur->req_prio == expected_prio);
+}
+
+/**
+ * nbcon_context_try_acquire_requested - Try to acquire after having
+ * requested a handover
+ * @ctxt: The context of the caller
+ * @cur: The current console state
+ *
+ * This is a helper function for nbcon_context_try_acquire_handover().
+ * It is called when the console is in an unsafe state. The current
+ * owner will release the console on exit from the unsafe region.
+ *
+ * Return: 0 on success and @cur is updated to the new console state.
+ * Otherwise an error code on failure.
+ *
+ * Errors:
+ *
+ * -EPERM: A panic is in progress and this is not the panic CPU
+ * or this context is no longer the waiter.
+ *
+ * -EBUSY: The console is still locked. The caller should
+ * continue waiting.
+ *
+ * Note: The caller must still remove the request when an error has occurred
+ * except when this context is no longer the waiter.
+ */
+static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
+ struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state new;
+
+ /* Note that the caller must still remove the request! */
+ if (other_cpu_in_panic())
+ return -EPERM;
+
+ /*
+ * Note that the waiter will also change if there was an unsafe
+ * hostile takeover.
+ */
+ if (!nbcon_waiter_matches(cur, ctxt->prio))
+ return -EPERM;
+
+ /* If still locked, caller should continue waiting. */
+ if (cur->prio != NBCON_PRIO_NONE)
+ return -EBUSY;
+
+ /*
+ * The previous owner should have never released ownership
+ * in an unsafe region.
+ */
+ WARN_ON_ONCE(cur->unsafe);
+
+ new.atom = cur->atom;
+ new.prio = ctxt->prio;
+ new.req_prio = NBCON_PRIO_NONE;
+ new.unsafe = cur->unsafe_takeover;
+ new.cpu = cpu;
+
+ if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
+ /*
+ * The acquire could fail only when it has been taken
+ * over by a higher priority context.
+ */
+ WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
+ return -EPERM;
+ }
+
+ /* Handover success. This context now owns the console. */
+ return 0;
+}
+
+/**
+ * nbcon_context_try_acquire_handover - Try to acquire via handover
+ * @ctxt: The context of the caller
+ * @cur: The current console state
+ *
+ * The function must be called only when the context has higher priority
+ * than the current owner and the console is in an unsafe state.
+ * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
+ *
+ * The function sets "req_prio" field to make the current owner aware of
+ * the request. Then it waits until the current owner releases the console,
+ * or an even higher context takes over the request, or timeout expires.
+ *
+ * The current owner checks the "req_prio" field on exit from the unsafe
+ * region and releases the console. It does not touch the "req_prio" field
+ * so that the console stays reserved for the waiter.
+ *
+ * Return: 0 on success. Otherwise, an error code on failure. Also @cur
+ * is updated to the latest state when failed to modify it.
+ *
+ * Errors:
+ *
+ * -EPERM: A panic is in progress and this is not the panic CPU.
+ * Or a higher priority context has taken over the
+ * console or the handover request.
+ *
+ * -EBUSY: The current owner is on the same CPU so that the hand
+ * shake could not work. Or the current owner is not
+ * willing to wait (zero timeout). Or the console does
+ * not enter the safe state before timeout passed. The
+ * caller might still use the unsafe hostile takeover
+ * when allowed.
+ *
+ * -EAGAIN: @cur has changed when creating the handover request.
+ * The caller should retry with direct acquire.
+ */
+static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
+ struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state new;
+ int timeout;
+ int request_err = -EBUSY;
+
+ /*
+ * Check that the handover is called when the direct acquire failed
+ * with -EBUSY.
+ */
+ WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
+ WARN_ON_ONCE(!cur->unsafe);
+
+ /* Handover is not possible on the same CPU. */
+ if (cur->cpu == cpu)
+ return -EBUSY;
+
+ /*
+ * Console stays unsafe after an unsafe takeover until re-initialized.
+ * Waiting is not going to help in this case.
+ */
+ if (cur->unsafe_takeover)
+ return -EBUSY;
+
+ /* Is the caller willing to wait? */
+ if (ctxt->spinwait_max_us == 0)
+ return -EBUSY;
+
+ /*
+ * Setup a request for the handover. The caller should try to acquire
+ * the console directly when the current state has been modified.
+ */
+ new.atom = cur->atom;
+ new.req_prio = ctxt->prio;
+ if (!nbcon_state_try_cmpxchg(con, cur, &new))
+ return -EAGAIN;
+
+ cur->atom = new.atom;
+
+ /* Wait until there is no owner and then acquire the console. */
+ for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
+ /* On successful acquire, this request is cleared. */
+ request_err = nbcon_context_try_acquire_requested(ctxt, cur);
+ if (!request_err)
+ return 0;
+
+ /*
+ * If the acquire should be aborted, it must be ensured
+ * that the request is removed before returning to caller.
+ */
+ if (request_err == -EPERM)
+ break;
+
+ udelay(1);
+
+ /* Re-read the state because some time has passed. */
+ nbcon_state_read(con, cur);
+ }
+
+ /* Timed out or aborted. Carefully remove handover request. */
+ do {
+ /*
+ * No need to remove request if there is a new waiter. This
+ * can only happen if a higher priority context has taken over
+ * the console or the handover request.
+ */
+ if (!nbcon_waiter_matches(cur, ctxt->prio))
+ return -EPERM;
+
+ /* Unset request for handover. */
+ new.atom = cur->atom;
+ new.req_prio = NBCON_PRIO_NONE;
+ if (nbcon_state_try_cmpxchg(con, cur, &new)) {
+ /*
+ * Request successfully unset. Report failure of
+ * acquiring via handover.
+ */
+ cur->atom = new.atom;
+ return request_err;
+ }
+
+ /*
+ * Unable to remove request. Try to acquire in case
+ * the owner has released the lock.
+ */
+ } while (nbcon_context_try_acquire_requested(ctxt, cur));
+
+ /* Lucky timing. The acquire succeeded while removing the request. */
+ return 0;
+}
+
+/**
+ * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
+ * @ctxt: The context of the caller
+ * @cur: The current console state
+ *
+ * Acquire the console even in the unsafe state.
+ *
+ * It can be permitted by setting the 'allow_unsafe_takeover' field only
+ * by the final attempt to flush messages in panic().
+ *
+ * Return: 0 on success. -EPERM when not allowed by the context.
+ */
+static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
+ struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state new;
+
+ if (!ctxt->allow_unsafe_takeover)
+ return -EPERM;
+
+ /* Ensure caller is allowed to perform unsafe hostile takeovers. */
+ if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
+ return -EPERM;
+
+ /*
+ * Check that try_acquire_direct() and try_acquire_handover() returned
+ * -EBUSY in the right situation.
+ */
+ WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
+ WARN_ON_ONCE(cur->unsafe != true);
+
+ do {
+ new.atom = cur->atom;
+ new.cpu = cpu;
+ new.prio = ctxt->prio;
+ new.unsafe |= cur->unsafe_takeover;
+ new.unsafe_takeover |= cur->unsafe;
+
+ } while (!nbcon_state_try_cmpxchg(con, cur, &new));
+
+ return 0;
+}
+
+static struct printk_buffers panic_nbcon_pbufs;
+
+/**
+ * nbcon_context_try_acquire - Try to acquire nbcon console
+ * @ctxt: The context of the caller
+ *
+ * Context: Any context which could not be migrated to another CPU.
+ * Return: True if the console was acquired. False otherwise.
+ *
+ * If the caller allowed an unsafe hostile takeover, on success the
+ * caller should check the current console state to see if it is
+ * in an unsafe state. Otherwise, on success the caller may assume
+ * the console is not in an unsafe state.
+ */
+static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+ int err;
+
+ nbcon_state_read(con, &cur);
+try_again:
+ err = nbcon_context_try_acquire_direct(ctxt, &cur);
+ if (err != -EBUSY)
+ goto out;
+
+ err = nbcon_context_try_acquire_handover(ctxt, &cur);
+ if (err == -EAGAIN)
+ goto try_again;
+ if (err != -EBUSY)
+ goto out;
+
+ err = nbcon_context_try_acquire_hostile(ctxt, &cur);
+out:
+ if (err)
+ return false;
+
+ /* Acquire succeeded. */
+
+ /* Assign the appropriate buffer for this context. */
+ if (atomic_read(&panic_cpu) == cpu)
+ ctxt->pbufs = &panic_nbcon_pbufs;
+ else
+ ctxt->pbufs = con->pbufs;
+
+ /* Set the record sequence for this context to print. */
+ ctxt->seq = nbcon_seq_read(ctxt->console);
+
+ return true;
+}
+
+static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
+ int expected_prio)
+{
+ /*
+ * Since consoles can only be acquired by higher priorities,
+ * owning contexts are uniquely identified by @prio. However,
+ * since contexts can unexpectedly lose ownership, it is
+ * possible that later another owner appears with the same
+ * priority. For this reason @cpu is also needed.
+ */
+
+ if (cur->prio != expected_prio)
+ return false;
+
+ if (cur->cpu != expected_cpu)
+ return false;
+
+ return true;
+}
+
+/**
+ * nbcon_context_release - Release the console
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ */
+static void nbcon_context_release(struct nbcon_context *ctxt)
+{
+ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+ struct nbcon_state new;
+
+ nbcon_state_read(con, &cur);
+
+ do {
+ if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
+ break;
+
+ new.atom = cur.atom;
+ new.prio = NBCON_PRIO_NONE;
+
+ /*
+ * If @unsafe_takeover is set, it is kept set so that
+ * the state remains permanently unsafe.
+ */
+ new.unsafe |= cur.unsafe_takeover;
+
+ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
+
+ ctxt->pbufs = NULL;
+}
+
+/**
+ * nbcon_context_can_proceed - Check whether ownership can proceed
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ * @cur: The current console state
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * Must be invoked when entering the unsafe state to make sure that it still
+ * owns the lock. Also must be invoked when exiting the unsafe context
+ * to eventually free the lock for a higher priority context which asked
+ * for the friendly handover.
+ *
+ * It can be called inside an unsafe section when the console is just
+ * temporary in safe state instead of exiting and entering the unsafe
+ * state.
+ *
+ * Also it can be called in the safe context before doing an expensive
+ * safe operation. It does not make sense to do the operation when
+ * a higher priority context took the lock.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ */
+static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Make sure this context still owns the console. */
+ if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
+ return false;
+
+ /* The console owner can proceed if there is no waiter. */
+ if (cur->req_prio == NBCON_PRIO_NONE)
+ return true;
+
+ /*
+ * A console owner within an unsafe region is always allowed to
+ * proceed, even if there are waiters. It can perform a handover
+ * when exiting the unsafe region. Otherwise the waiter will
+ * need to perform an unsafe hostile takeover.
+ */
+ if (cur->unsafe)
+ return true;
+
+ /* Waiters always have higher priorities than owners. */
+ WARN_ON_ONCE(cur->req_prio <= cur->prio);
+
+ /*
+ * Having a safe point for take over and eventually a few
+ * duplicated characters or a full line is way better than a
+ * hostile takeover. Post processing can take care of the garbage.
+ * Release and hand over.
+ */
+ nbcon_context_release(ctxt);
+
+ /*
+ * It is not clear whether the waiter really took over ownership. The
+ * outermost callsite must make the final decision whether console
+ * ownership is needed for it to proceed. If yes, it must reacquire
+ * ownership (possibly hostile) before carefully proceeding.
+ *
+ * The calling context no longer owns the console so go back all the
+ * way instead of trying to implement reacquire heuristics in tons of
+ * places.
+ */
+ return false;
+}
+
+/**
+ * nbcon_can_proceed - Check whether ownership can proceed
+ * @wctxt: The write context that was handed to the write function
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * It is used in nbcon_enter_unsafe() to make sure that it still owns the
+ * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
+ * for a higher priority context which asked for the friendly handover.
+ *
+ * It can be called inside an unsafe section when the console is just
+ * temporary in safe state instead of exiting and entering the unsafe state.
+ *
+ * Also it can be called in the safe context before doing an expensive safe
+ * operation. It does not make sense to do the operation when a higher
+ * priority context took the lock.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ */
+bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+
+ nbcon_state_read(con, &cur);
+
+ return nbcon_context_can_proceed(ctxt, &cur);
+}
+EXPORT_SYMBOL_GPL(nbcon_can_proceed);
+
+#define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true)
+#define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false)
+
+/**
+ * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ * @unsafe: The new value for the unsafe bit
+ *
+ * Return: True if the unsafe state was updated and this context still
+ * owns the console. Otherwise false if ownership was handed
+ * over or taken.
+ *
+ * This function allows console owners to modify the unsafe status of the
+ * console.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ *
+ * Internal helper to avoid duplicated code.
+ */
+static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
+{
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+ struct nbcon_state new;
+
+ nbcon_state_read(con, &cur);
+
+ do {
+ /*
+ * The unsafe bit must not be cleared if an
+ * unsafe hostile takeover has occurred.
+ */
+ if (!unsafe && cur.unsafe_takeover)
+ goto out;
+
+ if (!nbcon_context_can_proceed(ctxt, &cur))
+ return false;
+
+ new.atom = cur.atom;
+ new.unsafe = unsafe;
+ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
+
+ cur.atom = new.atom;
+out:
+ return nbcon_context_can_proceed(ctxt, &cur);
+}
+
+/**
+ * nbcon_enter_unsafe - Enter an unsafe region in the driver
+ * @wctxt: The write context that was handed to the write function
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ */
+bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+
+ return nbcon_context_enter_unsafe(ctxt);
+}
+EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
+
+/**
+ * nbcon_exit_unsafe - Exit an unsafe region in the driver
+ * @wctxt: The write context that was handed to the write function
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context.
+ */
+bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+
+ return nbcon_context_exit_unsafe(ctxt);
+}
+EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+
+/**
+ * nbcon_reacquire - Reacquire a console after losing ownership
+ * @wctxt: The write context that was handed to the write function
+ *
+ * Since ownership can be lost at any time due to handover or takeover, a
+ * printing context _should_ be prepared to back out immediately and
+ * carefully. However, there are many scenarios where the context _must_
+ * reacquire ownership in order to finalize or revert hardware changes.
+ *
+ * This function allows a context to reacquire ownership using the same
+ * priority as its previous ownership.
+ *
+ * Note that for printing contexts, after a successful reacquire the
+ * context will have no output buffer because that has been lost. This
+ * function cannot be used to resume printing.
+ */
+void nbcon_reacquire(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+
+ while (!nbcon_context_try_acquire(ctxt))
+ cpu_relax();
+
+ wctxt->outbuf = NULL;
+ wctxt->len = 0;
+ nbcon_state_read(con, &cur);
+ wctxt->unsafe_takeover = cur.unsafe_takeover;
+}
+EXPORT_SYMBOL_GPL(nbcon_reacquire);
+
+/**
+ * nbcon_emit_next_record - Emit a record in the acquired context
+ * @wctxt: The write context that will be handed to the write function
+ * @use_atomic: True if the write_atomic callback is to be used
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+ *
+ * When this function returns false then the calling context no longer owns
+ * the console and is no longer allowed to go forward. In this case it must
+ * back out immediately and carefully. The buffer content is also no longer
+ * trusted since it no longer belongs to the calling context. If the caller
+ * wants to do more it must reacquire the console first.
+ *
+ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
+ * still records pending in the ringbuffer,
+ */
+static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ struct console *con = ctxt->console;
+ bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
+ struct printk_message pmsg = {
+ .pbufs = ctxt->pbufs,
+ };
+ unsigned long con_dropped;
+ struct nbcon_state cur;
+ unsigned long dropped;
+ bool done = false;
+
+ /*
+ * The printk buffers are filled within an unsafe section. This
+ * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
+ * clobbering each other.
+ */
+
+ if (!nbcon_context_enter_unsafe(ctxt))
+ return false;
+
+ ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
+ if (!ctxt->backlog)
+ return nbcon_context_exit_unsafe(ctxt);
+
+ /*
+ * @con->dropped is not protected in case of an unsafe hostile
+ * takeover. In that situation the update can be racy so
+ * annotate it accordingly.
+ */
+ con_dropped = data_race(READ_ONCE(con->dropped));
+
+ dropped = con_dropped + pmsg.dropped;
+ if (dropped && !is_extended)
+ console_prepend_dropped(&pmsg, dropped);
+
+ if (!nbcon_context_exit_unsafe(ctxt))
+ return false;
+
+ /* For skipped records just update seq/dropped in @con. */
+ if (pmsg.outbuf_len == 0)
+ goto update_con;
+
+ /* Initialize the write context for driver callbacks. */
+ wctxt->outbuf = &pmsg.pbufs->outbuf[0];
+ wctxt->len = pmsg.outbuf_len;
+ nbcon_state_read(con, &cur);
+ wctxt->unsafe_takeover = cur.unsafe_takeover;
+
+ if (use_atomic &&
+ con->write_atomic) {
+ done = con->write_atomic(con, wctxt);
+
+ } else if (!use_atomic &&
+ con->write_thread &&
+ con->kthread) {
+ WARN_ON_ONCE(con->kthread != current);
+ done = con->write_thread(con, wctxt);
+ }
+
+ if (!done) {
+ /*
+ * The emit was aborted, probably due to a loss of ownership.
+ * Ensure ownership was lost or released before reporting the
+ * loss.
+ */
+ nbcon_context_release(ctxt);
+ return false;
+ }
+
+ /*
+ * Since any dropped message was successfully output, reset the
+ * dropped count for the console.
+ */
+ dropped = 0;
+update_con:
+ /*
+ * The dropped count and the sequence number are updated within an
+ * unsafe section. This limits update races to the panic context and
+ * allows the panic context to win.
+ */
+
+ if (!nbcon_context_enter_unsafe(ctxt))
+ return false;
+
+ if (dropped != con_dropped) {
+ /* Counterpart to the READ_ONCE() above. */
+ WRITE_ONCE(con->dropped, dropped);
+ }
+
+ nbcon_seq_try_update(ctxt, pmsg.seq + 1);
+
+ return nbcon_context_exit_unsafe(ctxt);
+}
+
+/**
+ * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
+ * @con: Console to operate on
+ * @ctxt: The acquire context that contains the state
+ * at console_acquire()
+ *
+ * Return: True if the thread should shutdown or if the console is
+ * allowed to print and a record is available. False otherwise.
+ *
+ * After the thread wakes up, it must first check if it should shutdown before
+ * attempting any printing.
+ */
+static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
+{
+ bool is_usable;
+ short flags;
+ int cookie;
+
+ if (kthread_should_stop())
+ return true;
+
+ cookie = console_srcu_read_lock();
+ flags = console_srcu_read_flags(con);
+ is_usable = console_is_usable(con, flags, false);
+ console_srcu_read_unlock(cookie);
+
+ if (!is_usable)
+ return false;
+
+ /* Bring the sequence in @ctxt up to date */
+ ctxt->seq = nbcon_seq_read(con);
+
+ return prb_read_valid(prb, ctxt->seq, NULL);
+}
+
+/**
+ * nbcon_kthread_func - The printer thread function
+ * @__console: Console to operate on
+ */
+static int nbcon_kthread_func(void *__console)
+{
+ struct console *con = __console;
+ struct nbcon_write_context wctxt = {
+ .ctxt.console = con,
+ .ctxt.prio = NBCON_PRIO_NORMAL,
+ };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+ unsigned long flags;
+ short con_flags;
+ bool backlog;
+ int cookie;
+ int ret;
+
+wait_for_event:
+ /*
+ * Guarantee this task is visible on the rcuwait before
+ * checking the wake condition.
+ *
+ * The full memory barrier within set_current_state() of
+ * ___rcuwait_wait_event() pairs with the full memory
+ * barrier within rcuwait_has_sleeper().
+ *
+ * This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
+ */
+ ret = rcuwait_wait_event(&con->rcuwait,
+ nbcon_kthread_should_wakeup(con, ctxt),
+ TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
+
+ if (kthread_should_stop())
+ return 0;
+
+ /* Wait was interrupted by a spurious signal, go back to sleep. */
+ if (ret)
+ goto wait_for_event;
+
+ do {
+ backlog = false;
+
+ cookie = console_srcu_read_lock();
+
+ con_flags = console_srcu_read_flags(con);
+
+ if (console_is_usable(con, con_flags, false)) {
+ con->driver_enter(con, &flags);
+
+ /*
+ * Ensure this stays on the CPU to make handover and
+ * takeover possible.
+ */
+ cant_migrate();
+
+ if (nbcon_context_try_acquire(ctxt)) {
+ /*
+ * If the emit fails, this context is no
+ * longer the owner.
+ */
+ if (nbcon_emit_next_record(&wctxt, false)) {
+ nbcon_context_release(ctxt);
+ backlog = ctxt->backlog;
+ }
+ }
+
+ con->driver_exit(con, flags);
+ }
+
+ console_srcu_read_unlock(cookie);
+
+ } while (backlog);
+
+ goto wait_for_event;
+}
+
+/**
+ * nbcon_irq_work - irq work to wake printk thread
+ * @irq_work: The irq work to operate on
+ */
+static void nbcon_irq_work(struct irq_work *irq_work)
+{
+ struct console *con = container_of(irq_work, struct console, irq_work);
+
+ nbcon_kthread_wake(con);
+}
+
+static inline bool rcuwait_has_sleeper(struct rcuwait *w)
+{
+ bool has_sleeper;
+
+ rcu_read_lock();
+ /*
+ * Guarantee any new records can be seen by tasks preparing to wait
+ * before this context checks if the rcuwait is empty.
+ *
+ * This full memory barrier pairs with the full memory barrier within
+ * set_current_state() of ___rcuwait_wait_event(), which is called
+ * after prepare_to_rcuwait() adds the waiter but before it has
+ * checked the wait condition.
+ *
+ * This pairs with nbcon_kthread_func:A.
+ */
+ smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
+ has_sleeper = !!rcu_dereference(w->task);
+ rcu_read_unlock();
+
+ return has_sleeper;
+}
+
+/**
+ * nbcon_wake_threads - Wake up printing threads using irq_work
+ */
+void nbcon_wake_threads(void)
+{
+ struct console *con;
+ int cookie;
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ /*
+ * Only schedule irq_work if the printing thread is
+ * actively waiting. If not waiting, the thread will
+ * notice by itself that it has work to do.
+ */
+ if (con->kthread && rcuwait_has_sleeper(&con->rcuwait))
+ irq_work_queue(&con->irq_work);
+ }
+ console_srcu_read_unlock(cookie);
+}
+
+/* Track the nbcon emergency nesting per CPU. */
+static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
+static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
+
+/**
+ * nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer
+ *
+ * Return: Either a pointer to the per CPU emergency nesting counter of
+ * the current CPU or to the init data during early boot.
+ */
+static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
+{
+ /*
+ * The value of __printk_percpu_data_ready gets set in normal
+ * context and before SMP initialization. As a result it could
+ * never change while inside an nbcon emergency section.
+ */
+ if (!printk_percpu_data_ready())
+ return &early_nbcon_pcpu_emergency_nesting;
+
+ return this_cpu_ptr(&nbcon_pcpu_emergency_nesting);
+}
+
+/**
+ * nbcon_atomic_emit_one - Print one record for an nbcon console using the
+ * write_atomic() callback
+ * @wctxt: An initialized write context struct to use
+ * for this context
+ *
+ * Return: False if the given console could not print a record or there
+ * are no more records to print, otherwise true.
+ *
+ * This is an internal helper to handle the locking of the console before
+ * calling nbcon_emit_next_record().
+ */
+static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+
+ if (!nbcon_context_try_acquire(ctxt))
+ return false;
+
+ /*
+ * nbcon_emit_next_record() returns false when the console was
+ * handed over or taken over. In both cases the context is no
+ * longer valid.
+ */
+ if (!nbcon_emit_next_record(wctxt, true))
+ return false;
+
+ nbcon_context_release(ctxt);
+
+ return ctxt->backlog;
+}
+
+/**
+ * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
+ * printing on the current CPU
+ *
+ * Context: Any context which could not be migrated to another CPU.
+ * Return: The nbcon_prio to use for acquiring an nbcon console in this
+ * context for printing.
+ */
+enum nbcon_prio nbcon_get_default_prio(void)
+{
+ unsigned int *cpu_emergency_nesting;
+
+ if (this_cpu_in_panic())
+ return NBCON_PRIO_PANIC;
+
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
+ if (*cpu_emergency_nesting)
+ return NBCON_PRIO_EMERGENCY;
+
+ return NBCON_PRIO_NORMAL;
+}
+
+/**
+ * nbcon_atomic_emit_next_record - Print one record for an nbcon console
+ * using the write_atomic() callback
+ * @con: The console to print on
+ * @handover: Will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding
+ * both the console_lock and the SRCU read lock. Otherwise it
+ * is set to false.
+ * @cookie: The cookie from the SRCU read lock.
+ *
+ * Context: Any context which could not be migrated to another CPU.
+ * Return: True if a record could be printed, otherwise false.
+ *
+ * This function is meant to be called by console_flush_all() to print records
+ * on nbcon consoles using the write_atomic() callback. Essentially it is the
+ * nbcon version of console_emit_next_record().
+ */
+bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie)
+{
+ struct nbcon_write_context wctxt = { };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+ unsigned long driver_flags;
+ bool progress = false;
+ unsigned long flags;
+
+ *handover = false;
+
+ /* Use the same locking order as console_emit_next_record(). */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ printk_safe_enter_irqsave(flags);
+ console_lock_spinning_enable();
+ stop_critical_timings();
+ }
+
+ con->driver_enter(con, &driver_flags);
+ cant_migrate();
+
+ ctxt->console = con;
+ ctxt->prio = nbcon_get_default_prio();
+
+ progress = nbcon_atomic_emit_one(&wctxt);
+
+ con->driver_exit(con, driver_flags);
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ start_critical_timings();
+ *handover = console_lock_spinning_disable_and_check(cookie);
+ printk_safe_exit_irqrestore(flags);
+ }
+
+ return progress;
+}
+
+/**
+ * __nbcon_atomic_flush_all - Flush all nbcon consoles using their
+ * write_atomic() callback
+ * @stop_seq: Flush up until this record
+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
+ */
+static void __nbcon_atomic_flush_all(u64 stop_seq, bool allow_unsafe_takeover)
+{
+ struct nbcon_write_context wctxt = { };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+ struct console *con;
+ bool any_progress;
+ int cookie;
+
+ do {
+ any_progress = false;
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ short flags = console_srcu_read_flags(con);
+ unsigned long irq_flags;
+
+ if (!(flags & CON_NBCON))
+ continue;
+
+ if (!console_is_usable(con, flags, true))
+ continue;
+
+ if (nbcon_seq_read(con) >= stop_seq)
+ continue;
+
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->console = con;
+ ctxt->spinwait_max_us = 2000;
+ ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
+
+ /*
+ * Atomic flushing does not use console driver
+ * synchronization (i.e. it does not hold the port
+ * lock for uart consoles). Therefore IRQs must be
+ * disabled to avoid being interrupted and then
+ * calling into a driver that will deadlock trying
+ * acquire console ownership.
+ *
+ * This also disables migration in order to get the
+ * current CPU priority.
+ */
+ local_irq_save(irq_flags);
+
+ ctxt->prio = nbcon_get_default_prio();
+
+ any_progress |= nbcon_atomic_emit_one(&wctxt);
+
+ local_irq_restore(irq_flags);
+ }
+ console_srcu_read_unlock(cookie);
+ } while (any_progress);
+}
+
+/**
+ * nbcon_atomic_flush_all - Flush all nbcon consoles using their
+ * write_atomic() callback
+ *
+ * Flush the backlog up through the currently newest record. Any new
+ * records added while flushing will not be flushed. This is to avoid
+ * one CPU printing unbounded because other CPUs continue to add records.
+ */
+void nbcon_atomic_flush_all(void)
+{
+ __nbcon_atomic_flush_all(prb_next_reserve_seq(prb), false);
+}
+
+/**
+ * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
+ * write_atomic() callback and allowing unsafe hostile takeovers
+ *
+ * Flush the backlog up through the currently newest record. Unsafe hostile
+ * takeovers will be performed, if necessary.
+ */
+void nbcon_atomic_flush_unsafe(void)
+{
+ __nbcon_atomic_flush_all(prb_next_reserve_seq(prb), true);
+}
+
+/**
+ * nbcon_cpu_emergency_enter - Enter an emergency section where printk()
+ * messages for that CPU are only stored
+ *
+ * Upon exiting the emergency section, all stored messages are flushed.
+ *
+ * Context: Any context. Disables preemption.
+ *
+ * When within an emergency section, no printing occurs on that CPU. This
+ * is to allow all emergency messages to be dumped into the ringbuffer before
+ * flushing the ringbuffer. The actual printing occurs when exiting the
+ * outermost emergency section.
+ */
+void nbcon_cpu_emergency_enter(void)
+{
+ unsigned int *cpu_emergency_nesting;
+
+ preempt_disable();
+
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
+ (*cpu_emergency_nesting)++;
+}
+
+/**
+ * nbcon_cpu_emergency_exit - Exit an emergency section and flush the
+ * stored messages
+ *
+ * Flushing only occurs when exiting all nesting for the CPU.
+ *
+ * Context: Any context. Enables preemption.
+ */
+void nbcon_cpu_emergency_exit(void)
+{
+ unsigned int *cpu_emergency_nesting;
+ bool do_trigger_flush = false;
+
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
+
+ WARN_ON_ONCE(*cpu_emergency_nesting == 0);
+
+ if (*cpu_emergency_nesting == 1)
+ do_trigger_flush = true;
+
+ /* Undo the nesting count of nbcon_cpu_emergency_enter(). */
+ (*cpu_emergency_nesting)--;
+
+ preempt_enable();
+
+ if (do_trigger_flush)
+ printk_trigger_flush();
+}
+
+/**
+ * nbcon_kthread_stop - Stop a printer thread
+ * @con: Console to operate on
+ */
+static void nbcon_kthread_stop(struct console *con)
+{
+ lockdep_assert_console_list_lock_held();
+
+ if (!con->kthread)
+ return;
+
+ kthread_stop(con->kthread);
+ con->kthread = NULL;
+}
+
+/**
+ * nbcon_kthread_create - Create a printer thread
+ * @con: Console to operate on
+ *
+ * If it fails, let the console proceed. The atomic part might
+ * be usable and useful.
+ */
+void nbcon_kthread_create(struct console *con)
+{
+ struct task_struct *kt;
+
+ lockdep_assert_console_list_lock_held();
+
+ if (!(con->flags & CON_NBCON) || !con->write_thread)
+ return;
+
+ if (!printk_threads_enabled || con->kthread)
+ return;
+
+ /*
+ * Printer threads cannot be started as long as any boot console is
+ * registered because there is no way to synchronize the hardware
+ * registers between boot console code and regular console code.
+ */
+ if (have_boot_console)
+ return;
+
+ kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
+ if (IS_ERR(kt)) {
+ con_printk(KERN_ERR, con, "failed to start printing thread\n");
+ return;
+ }
+
+ con->kthread = kt;
+
+ /*
+ * It is important that console printing threads are scheduled
+ * shortly after a printk call and with generous runtime budgets.
+ */
+ sched_set_normal(con->kthread, -20);
+}
+
+static int __init printk_setup_threads(void)
+{
+ struct console *con;
+
+ console_list_lock();
+ printk_threads_enabled = true;
+ for_each_console(con)
+ nbcon_kthread_create(con);
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && printing_via_unlock)
+ nbcon_legacy_kthread_create();
+ console_list_unlock();
+ return 0;
+}
+early_initcall(printk_setup_threads);
+
+/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
+ * Return: True on success. False otherwise and the console cannot
+ * be used.
+ *
+ * This is not part of nbcon_init() because buffer allocation must
+ * be performed earlier in the console registration process.
+ */
+bool nbcon_alloc(struct console *con)
+{
+ if (con->flags & CON_BOOT) {
+ /*
+ * Boot console printing is synchronized with legacy console
+ * printing, so boot consoles can share the same global printk
+ * buffers.
+ */
+ con->pbufs = &printk_shared_pbufs;
+ } else {
+ con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
+ if (!con->pbufs) {
+ con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * nbcon_init - Initialize the nbcon console specific data
+ * @con: Console to initialize
+ *
+ * nbcon_alloc() *must* be called and succeed before this function
+ * is called.
+ *
+ * This function expects that the legacy @con->seq has been set.
+ */
+void nbcon_init(struct console *con)
+{
+ struct nbcon_state state = { };
+
+ /* nbcon_alloc() must have been called and successful! */
+ BUG_ON(!con->pbufs);
+
+ rcuwait_init(&con->rcuwait);
+ init_irq_work(&con->irq_work, nbcon_irq_work);
+ nbcon_seq_force(con, con->seq);
+ nbcon_state_set(con, &state);
+ nbcon_kthread_create(con);
+}
+
+/**
+ * nbcon_free - Free and cleanup the nbcon console specific data
+ * @con: Console to free/cleanup nbcon data
+ */
+void nbcon_free(struct console *con)
+{
+ struct nbcon_state state = { };
+
+ nbcon_kthread_stop(con);
+ nbcon_state_set(con, &state);
+
+ /* Boot consoles share global printk buffers. */
+ if (!(con->flags & CON_BOOT))
+ kfree(con->pbufs);
+
+ con->pbufs = NULL;
+}
+
+static inline bool uart_is_nbcon(struct uart_port *up)
+{
+ int cookie;
+ bool ret;
+
+ if (!uart_console(up))
+ return false;
+
+ cookie = console_srcu_read_lock();
+ ret = (console_srcu_read_flags(up->cons) & CON_NBCON);
+ console_srcu_read_unlock(cookie);
+ return ret;
+}
+
+/**
+ * nbcon_acquire - The second half of the port locking wrapper
+ * @up: The uart port whose @lock was locked
+ *
+ * The uart_port_lock() wrappers will first lock the spin_lock @up->lock.
+ * Then this function is called to implement nbcon-specific processing.
+ *
+ * If @up is an nbcon console, this console will be acquired and marked as
+ * unsafe. Otherwise this function does nothing.
+ *
+ * nbcon consoles acquired via the port lock wrapper always use priority
+ * NBCON_PRIO_NORMAL.
+ */
+void nbcon_acquire(struct uart_port *up)
+{
+ struct console *con = up->cons;
+ struct nbcon_context ctxt;
+
+ if (!uart_is_nbcon(up))
+ return;
+
+ WARN_ON_ONCE(con->locked_port);
+
+ do {
+ do {
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.console = con;
+ ctxt.prio = NBCON_PRIO_NORMAL;
+ } while (!nbcon_context_try_acquire(&ctxt));
+
+ } while (!nbcon_context_enter_unsafe(&ctxt));
+
+ con->locked_port = true;
+}
+EXPORT_SYMBOL_GPL(nbcon_acquire);
+
+/**
+ * nbcon_release - The first half of the port unlocking wrapper
+ * @up: The uart port whose @lock is about to be unlocked
+ *
+ * The uart_port_unlock() wrappers will first call this function to implement
+ * nbcon-specific processing. Then afterwards the uart_port_unlock() wrappers
+ * will unlock the spin_lock @up->lock.
+ *
+ * If @up is an nbcon console, the console will be marked as safe and
+ * released. Otherwise this function does nothing.
+ *
+ * nbcon consoles acquired via the port lock wrapper always use priority
+ * NBCON_PRIO_NORMAL.
+ */
+void nbcon_release(struct uart_port *up)
+{
+ struct console *con = up->cons;
+ struct nbcon_context ctxt = {
+ .console = con,
+ .prio = NBCON_PRIO_NORMAL,
+ };
+
+ if (!con->locked_port)
+ return;
+
+ if (nbcon_context_exit_unsafe(&ctxt))
+ nbcon_context_release(&ctxt);
+
+ con->locked_port = false;
+}
+EXPORT_SYMBOL_GPL(nbcon_release);
+
+/**
+ * printk_kthread_shutdown - shutdown all threaded printers
+ *
+ * On system shutdown all threaded printers are stopped. This allows printk
+ * to transition back to atomic printing, thus providing a robust mechanism
+ * for the final shutdown/reboot messages to be output.
+ */
+static void printk_kthread_shutdown(void)
+{
+ struct console *con;
+
+ console_list_lock();
+ for_each_console(con) {
+ if (con->flags & CON_NBCON)
+ nbcon_kthread_stop(con);
+ }
+ console_list_unlock();
+}
+
+static struct syscore_ops printk_syscore_ops = {
+ .shutdown = printk_kthread_shutdown,
+};
+
+static int __init printk_init_ops(void)
+{
+ register_syscore_ops(&printk_syscore_ops);
+ return 0;
+}
+device_initcall(printk_init_ops);
*/
int __read_mostly suppress_printk;
-/*
- * During panic, heavy printk by other CPUs can delay the
- * panic and risk deadlock on console resources.
- */
-static int __read_mostly suppress_panic_printk;
-
#ifdef CONFIG_LOCKDEP
static struct lockdep_map console_lock_dep_map = {
.name = "console_lock"
* Return: A cookie to pass to console_srcu_read_unlock().
*/
int console_srcu_read_lock(void)
+ __acquires(&console_srcu)
{
return srcu_read_lock_nmisafe(&console_srcu);
}
* Counterpart to console_srcu_read_lock()
*/
void console_srcu_read_unlock(int cookie)
+ __releases(&console_srcu)
{
srcu_read_unlock_nmisafe(&console_srcu, cookie);
}
return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
}
+/* Return true if a panic is in progress on the current CPU. */
+bool this_cpu_in_panic(void)
+{
+ /*
+ * We can use raw_smp_processor_id() here because it is impossible for
+ * the task to be migrated to the panic_cpu, or away from it. If
+ * panic_cpu has already been set, and we're not currently executing on
+ * that CPU, then we never will be.
+ */
+ return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id());
+}
+
+/*
+ * Return true if a panic is in progress on a remote CPU.
+ *
+ * On true, the local CPU should immediately release any printing resources
+ * that may be needed by the panic CPU.
+ */
+bool other_cpu_in_panic(void)
+{
+ return (panic_in_progress() && !this_cpu_in_panic());
+}
+
/*
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
static DEFINE_MUTEX(syslog_lock);
+/*
+ * Specifies if a legacy console is registered. If legacy consoles are
+ * present, it is necessary to perform the console_lock/console_unlock dance
+ * whenever console flushing should occur.
+ */
+bool have_legacy_console;
+
+/*
+ * Specifies if an nbcon console is registered. If nbcon consoles are present,
+ * synchronous printing of legacy consoles will not occur during panic until
+ * the backtrace has been stored to the ringbuffer.
+ */
+bool have_nbcon_console;
+
+/*
+ * Specifies if a boot console is registered. If boot consoles are present,
+ * nbcon consoles cannot print simultaneously and must be synchronized by
+ * the console lock. This is because boot consoles and nbcon consoles may
+ * have mapped the same hardware.
+ */
+bool have_boot_console;
+
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
+
+static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
+
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static struct printk_ringbuffer printk_rb_dynamic;
-static struct printk_ringbuffer *prb = &printk_rb_static;
+struct printk_ringbuffer *prb = &printk_rb_static;
/*
* We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
return len;
}
-static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
- bool is_extended, bool may_supress);
-
/* /dev/kmsg - userspace message inject/listen interface */
struct devkmsg_user {
atomic64_t seq;
* there may be a waiter spinning (like a spinlock). Also it must be
* ready to hand over the lock at the end of the section.
*/
-static void console_lock_spinning_enable(void)
+void console_lock_spinning_enable(void)
{
+ /*
+ * Do not use spinning in panic(). The panic CPU wants to keep the lock.
+ * Non-panic CPUs abandon the flush anyway.
+ *
+ * Just keep the lockdep annotation. The panic-CPU should avoid
+ * taking console_owner_lock because it might cause a deadlock.
+ * This looks like the easiest way how to prevent false lockdep
+ * reports without handling races a lockless way.
+ */
+ if (panic_in_progress())
+ goto lockdep;
+
raw_spin_lock(&console_owner_lock);
console_owner = current;
raw_spin_unlock(&console_owner_lock);
+lockdep:
/* The waiter may spin on us after setting console_owner */
spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
}
*
* Return: 1 if the lock rights were passed, 0 otherwise.
*/
-static int console_lock_spinning_disable_and_check(int cookie)
+int console_lock_spinning_disable_and_check(int cookie)
{
int waiter;
+ /*
+ * Ignore spinning waiters during panic() because they might get stopped
+ * or blocked at any time,
+ *
+ * It is safe because nobody is allowed to start spinning during panic
+ * in the first place. If there has been a waiter then non panic CPUs
+ * might stay spinning. They would get stopped anyway. The panic context
+ * will never start spinning and an interrupted spin on panic CPU will
+ * never continue.
+ */
+ if (panic_in_progress()) {
+ /* Keep lockdep happy. */
+ spin_release(&console_owner_dep_map, _THIS_IP_);
+ return 0;
+ }
+
raw_spin_lock(&console_owner_lock);
waiter = READ_ONCE(console_waiter);
console_owner = NULL;
return ret;
}
+static bool legacy_allow_panic_sync;
+
+/*
+ * This acts as a one-way switch to allow legacy consoles to print from
+ * the printk() caller context on a panic CPU.
+ */
+void printk_legacy_allow_panic_sync(void)
+{
+ legacy_allow_panic_sync = true;
+}
+
asmlinkage int vprintk_emit(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
+ bool do_trylock_unlock = printing_via_unlock &&
+ !IS_ENABLED(CONFIG_PREEMPT_RT);
int printed_len;
- bool in_sched = false;
/* Suppress unimportant messages after panic happens */
if (unlikely(suppress_printk))
return 0;
- if (unlikely(suppress_panic_printk) &&
- atomic_read(&panic_cpu) != raw_smp_processor_id())
+ /*
+ * The messages on the panic CPU are the most important. If
+ * non-panic CPUs are generating any messages, they will be
+ * silently dropped.
+ */
+ if (other_cpu_in_panic())
return 0;
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
- in_sched = true;
+ /* If called from the scheduler, we can not call up(). */
+ do_trylock_unlock = false;
}
printk_delay(level);
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
- /* If called from the scheduler, we can not call up(). */
- if (!in_sched) {
+ if (!have_boot_console && have_nbcon_console) {
+ bool is_panic_context = this_cpu_in_panic();
+
+ /*
+ * In panic, the legacy consoles are not allowed to print from
+ * the printk calling context unless explicitly allowed. This
+ * gives the safe nbcon consoles a chance to print out all the
+ * panic messages first. This restriction only applies if
+ * there are nbcon consoles registered.
+ */
+ if (is_panic_context)
+ do_trylock_unlock &= legacy_allow_panic_sync;
+
+ /*
+ * There are situations where nbcon atomic printing should
+ * happen in the printk() caller context:
+ *
+ * - When this CPU is in panic.
+ *
+ * - When booting, before the printing threads have been
+ * started.
+ *
+ * - During shutdown, since the printing threads may not get
+ * a chance to print the final messages.
+ *
+ * Note that if boot consoles are registered, the
+ * console_lock/console_unlock dance must be relied upon
+ * instead because nbcon consoles cannot print simultaneously
+ * with boot consoles.
+ */
+ if (is_panic_context ||
+ !printk_threads_enabled ||
+ (system_state > SYSTEM_RUNNING)) {
+ nbcon_atomic_flush_all();
+ }
+ }
+
+ nbcon_wake_threads();
+
+ if (do_trylock_unlock) {
/*
* The caller may be holding system-critical or
* timing-sensitive locks. Disable preemption during
* printing of all remaining records to all consoles so that
* this context can return as soon as possible. Hopefully
* another printk() caller will take over the printing.
+ *
+ * Also, nbcon_get_default_prio() requires migration disabled.
*/
preempt_disable();
+
/*
- * Try to acquire and then immediately release the console
- * semaphore. The release will print out buffers. With the
- * spinning variant, this context tries to take over the
- * printing from another printing context.
+ * Do not emit for EMERGENCY priority. The console will be
+ * explicitly flushed when exiting the emergency section.
*/
- if (console_trylock_spinning())
- console_unlock();
+ if (nbcon_get_default_prio() == NBCON_PRIO_EMERGENCY) {
+ do_trylock_unlock = false;
+ } else {
+ /*
+ * Try to acquire and then immediately release the
+ * console semaphore. The release will print out
+ * buffers. With the spinning variant, this context
+ * tries to take over the printing from another
+ * printing context.
+ */
+ if (console_trylock_spinning())
+ console_unlock();
+ }
+
preempt_enable();
}
- if (in_sched)
- defer_console_output();
- else
+ if (do_trylock_unlock)
wake_up_klogd();
+ else
+ defer_console_output();
return printed_len;
}
static bool pr_flush(int timeout_ms, bool reset_on_progress);
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
+static struct task_struct *nbcon_legacy_kthread;
+
+static inline void wake_up_legacy_kthread(void)
+{
+ if (nbcon_legacy_kthread)
+ wake_up_interruptible(&legacy_wait);
+}
+
#else /* CONFIG_PRINTK */
#define printk_time false
static u64 syslog_seq;
-static size_t record_print_text(const struct printk_record *r,
- bool syslog, bool time)
-{
- return 0;
-}
-static ssize_t info_print_ext_header(char *buf, size_t size,
- struct printk_info *info)
-{
- return 0;
-}
-static ssize_t msg_print_ext_body(char *buf, size_t size,
- char *text, size_t text_len,
- struct dev_printk_info *dev_info) { return 0; }
-static void console_lock_spinning_enable(void) { }
-static int console_lock_spinning_disable_and_check(int cookie) { return 0; }
-static bool suppress_message_printing(int level) { return false; }
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
+static inline void nbcon_legacy_kthread_create(void) { }
+static inline void wake_up_legacy_kthread(void) { }
#endif /* CONFIG_PRINTK */
#ifdef CONFIG_EARLY_PRINTK
void resume_console(void)
{
struct console *con;
+ short flags;
+ int cookie;
if (!console_suspend_enabled)
return;
*/
synchronize_srcu(&console_srcu);
+ /*
+ * Since this runs in task context, wake the threaded printers
+ * directly rather than scheduling irq_work to do it.
+ */
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ flags = console_srcu_read_flags(con);
+ if (flags & CON_NBCON)
+ nbcon_kthread_wake(con);
+ }
+ console_srcu_read_unlock(cookie);
+
+ wake_up_legacy_kthread();
+
pr_flush(1000, true);
}
*/
static int console_cpu_notify(unsigned int cpu)
{
- if (!cpuhp_tasks_frozen) {
+ if (!cpuhp_tasks_frozen && printing_via_unlock &&
+ !IS_ENABLED(CONFIG_PREEMPT_RT)) {
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
return 0;
}
-/*
- * Return true if a panic is in progress on a remote CPU.
- *
- * On true, the local CPU should immediately release any printing resources
- * that may be needed by the panic CPU.
- */
-bool other_cpu_in_panic(void)
-{
- if (!panic_in_progress())
- return false;
-
- /*
- * We can use raw_smp_processor_id() here because it is impossible for
- * the task to be migrated to the panic_cpu, or away from it. If
- * panic_cpu has already been set, and we're not currently executing on
- * that CPU, then we never will be.
- */
- return atomic_read(&panic_cpu) != raw_smp_processor_id();
-}
-
/**
* console_lock - block the console subsystem from printing
*
}
EXPORT_SYMBOL(is_console_locked);
-/*
- * Check if the given console is currently capable and allowed to print
- * records.
- *
- * Requires the console_srcu_read_lock.
- */
-static inline bool console_is_usable(struct console *con)
-{
- short flags = console_srcu_read_flags(con);
-
- if (!(flags & CON_ENABLED))
- return false;
-
- if ((flags & CON_SUSPENDED))
- return false;
-
- if (!con->write)
- return false;
-
- /*
- * Console drivers may assume that per-cpu resources have been
- * allocated. So unless they're explicitly marked as being able to
- * cope (CON_ANYTIME) don't call them until this CPU is officially up.
- */
- if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
- return false;
-
- return true;
-}
-
static void __console_unlock(void)
{
console_locked = 0;
up_console_sem();
}
+static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
+
+#ifdef CONFIG_PRINTK
+
/*
* Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
* is achieved by shifting the existing message over and inserting the dropped
*
* If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
*/
-#ifdef CONFIG_PRINTK
-static void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
+void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
{
struct printk_buffers *pbufs = pmsg->pbufs;
const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
memcpy(outbuf, scratchbuf, len);
pmsg->outbuf_len += len;
}
-#else
-#define console_prepend_dropped(pmsg, dropped)
-#endif /* CONFIG_PRINTK */
/*
* Read and format the specified record (or a later record if the specified
* of @pmsg are valid. (See the documentation of struct printk_message
* for information about the @pmsg fields.)
*/
-static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
- bool is_extended, bool may_suppress)
+bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+ bool is_extended, bool may_suppress)
{
- static int panic_console_dropped;
-
struct printk_buffers *pbufs = pmsg->pbufs;
const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
const size_t outbuf_sz = sizeof(pbufs->outbuf);
pmsg->seq = r.info->seq;
pmsg->dropped = r.info->seq - seq;
- /*
- * Check for dropped messages in panic here so that printk
- * suppression can occur as early as possible if necessary.
- */
- if (pmsg->dropped &&
- panic_in_progress() &&
- panic_console_dropped++ > 10) {
- suppress_panic_printk = 1;
- pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
- }
-
/* Skip record that has level above the console loglevel. */
if (may_suppress && suppress_message_printing(r.info->level))
goto out;
}
/*
+ * Used as the printk buffers for non-panic, serialized console printing.
+ * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
+ * Its usage requires the console_lock held.
+ */
+struct printk_buffers printk_shared_pbufs;
+
+/*
* Print one record for the given console. The record printed is whatever
* record is the next available record for the given console.
*
*/
static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
{
- static struct printk_buffers pbufs;
-
bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
- char *outbuf = &pbufs.outbuf[0];
+ char *outbuf = &printk_shared_pbufs.outbuf[0];
struct printk_message pmsg = {
- .pbufs = &pbufs,
+ .pbufs = &printk_shared_pbufs,
};
unsigned long flags;
con->dropped = 0;
}
- /*
- * While actively printing out messages, if another printk()
- * were to occur on another CPU, it may wait for this one to
- * finish. This task can not be preempted if there is a
- * waiter waiting to take over.
- *
- * Interrupts are disabled because the hand over to a waiter
- * must not be interrupted until the hand over is completed
- * (@console_waiter is cleared).
- */
- printk_safe_enter_irqsave(flags);
- console_lock_spinning_enable();
+ /* Write everything out to the hardware. */
- /* Do not trace print latency. */
- stop_critical_timings();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * On PREEMPT_RT this function is either in a thread or
+ * panic context. So there is no need for concern about
+ * printk reentrance, handovers, or lockdep complaints.
+ */
- /* Write everything out to the hardware. */
- con->write(con, outbuf, pmsg.outbuf_len);
+ con->write(con, outbuf, pmsg.outbuf_len);
+ con->seq = pmsg.seq + 1;
+ } else {
+ /*
+ * While actively printing out messages, if another printk()
+ * were to occur on another CPU, it may wait for this one to
+ * finish. This task can not be preempted if there is a
+ * waiter waiting to take over.
+ *
+ * Interrupts are disabled because the hand over to a waiter
+ * must not be interrupted until the hand over is completed
+ * (@console_waiter is cleared).
+ */
+ printk_safe_enter_irqsave(flags);
+ console_lock_spinning_enable();
- start_critical_timings();
+ /* Do not trace print latency. */
+ stop_critical_timings();
- con->seq = pmsg.seq + 1;
+ lock_map_acquire_try(&printk_legacy_map);
+ con->write(con, outbuf, pmsg.outbuf_len);
+ lock_map_release(&printk_legacy_map);
- *handover = console_lock_spinning_disable_and_check(cookie);
- printk_safe_exit_irqrestore(flags);
+ start_critical_timings();
+
+ con->seq = pmsg.seq + 1;
+
+ *handover = console_lock_spinning_disable_and_check(cookie);
+ printk_safe_exit_irqrestore(flags);
+ }
skip:
return true;
}
+#else
+
+static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
+{
+ *handover = false;
+ return false;
+}
+
+#endif /* CONFIG_PRINTK */
+
/*
* Print out all remaining records to all consoles.
*
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
+ short flags = console_srcu_read_flags(con);
+ u64 printk_seq;
bool progress;
- if (!console_is_usable(con))
+ /*
+ * console_flush_all() is only for legacy consoles,
+ * unless the nbcon console has no kthread printer.
+ */
+ if ((flags & CON_NBCON) && con->kthread)
+ continue;
+
+ if (!console_is_usable(con, flags, true))
continue;
any_usable = true;
- progress = console_emit_next_record(con, handover, cookie);
+ if (flags & CON_NBCON) {
+
+ lock_map_acquire_try(&printk_legacy_map);
+ progress = nbcon_atomic_emit_next_record(con, handover, cookie);
+ lock_map_release(&printk_legacy_map);
+
+ printk_seq = nbcon_seq_read(con);
+ } else {
+ progress = console_emit_next_record(con, handover, cookie);
+
+ printk_seq = con->seq;
+ }
/*
* If a handover has occurred, the SRCU read lock
return false;
/* Track the next of the highest seq flushed. */
- if (con->seq > *next_seq)
- *next_seq = con->seq;
+ if (printk_seq > *next_seq)
+ *next_seq = printk_seq;
if (!progress)
continue;
return false;
}
-/**
- * console_unlock - unblock the console subsystem from printing
- *
- * Releases the console_lock which the caller holds to block printing of
- * the console subsystem.
- *
- * While the console_lock was held, console output may have been buffered
- * by printk(). If this is the case, console_unlock(); emits
- * the output prior to releasing the lock.
- *
- * console_unlock(); may be called from any context.
- */
-void console_unlock(void)
+static void console_flush_and_unlock(void)
{
bool do_cond_resched;
bool handover;
*/
} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
}
+
+/**
+ * console_unlock - unblock the console subsystem from printing
+ *
+ * Releases the console_lock which the caller holds to block printing of
+ * the console subsystem.
+ *
+ * While the console_lock was held, console output may have been buffered
+ * by printk(). If this is the case, console_unlock(); emits
+ * the output prior to releasing the lock.
+ *
+ * console_unlock(); may be called from any context.
+ */
+void console_unlock(void)
+{
+ /*
+ * PREEMPT_RT relies on kthread and atomic consoles for printing.
+ * It never attempts to print from console_unlock().
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ __console_unlock();
+ return;
+ }
+
+ console_flush_and_unlock();
+}
EXPORT_SYMBOL(console_unlock);
/**
if (mode == CONSOLE_REPLAY_ALL) {
struct console *c;
+ short flags;
int cookie;
u64 seq;
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
- /*
- * This is an unsynchronized assignment, but the
- * kernel is in "hope and pray" mode anyway.
- */
- c->seq = seq;
+ flags = console_srcu_read_flags(c);
+
+ if (flags & CON_NBCON) {
+ nbcon_seq_force(c, seq);
+ } else {
+ /*
+ * This is an unsynchronized assignment. On
+ * panic legacy consoles are only best effort.
+ */
+ c->seq = seq;
+ }
}
console_srcu_read_unlock(cookie);
}
- console_flush_all(false, &next_seq, &handover);
+ nbcon_atomic_flush_all();
+
+ if (printing_via_unlock)
+ console_flush_all(false, &next_seq, &handover);
}
/*
void console_start(struct console *console)
{
+ short flags;
+
console_list_lock();
console_srcu_write_flags(console, console->flags | CON_ENABLED);
+ flags = console->flags;
console_list_unlock();
+
+ /*
+ * Ensure that all SRCU list walks have completed. The related
+ * printing context must be able to see it is enabled so that
+ * it is guaranteed to wake up and resume printing.
+ */
+ synchronize_srcu(&console_srcu);
+
+ if (flags & CON_NBCON)
+ nbcon_kthread_wake(console);
+ else
+ wake_up_legacy_kthread();
+
__pr_flush(console, 1000, true);
}
EXPORT_SYMBOL(console_start);
+#ifdef CONFIG_PRINTK
+static bool printer_should_wake(void)
+{
+ bool available = false;
+ struct console *con;
+ int cookie;
+
+ if (kthread_should_stop())
+ return true;
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ short flags = console_srcu_read_flags(con);
+ u64 printk_seq;
+
+ /*
+ * The legacy printer thread is only for legacy consoles,
+ * unless the nbcon console has no kthread printer.
+ */
+ if ((flags & CON_NBCON) && con->kthread)
+ continue;
+
+ if (!console_is_usable(con, flags, true))
+ continue;
+
+ if (flags & CON_NBCON) {
+ printk_seq = nbcon_seq_read(con);
+ } else {
+ /*
+ * It is safe to read @seq because only this
+ * thread context updates @seq.
+ */
+ printk_seq = con->seq;
+ }
+
+ if (prb_read_valid(prb, printk_seq, NULL)) {
+ available = true;
+ break;
+ }
+ }
+ console_srcu_read_unlock(cookie);
+
+ return available;
+}
+
+static int nbcon_legacy_kthread_func(void *unused)
+{
+ int error;
+
+ for (;;) {
+ error = wait_event_interruptible(legacy_wait, printer_should_wake());
+
+ if (kthread_should_stop())
+ break;
+
+ if (error)
+ continue;
+
+ console_lock();
+ console_flush_and_unlock();
+ }
+
+ return 0;
+}
+
+void nbcon_legacy_kthread_create(void)
+{
+ struct task_struct *kt;
+
+ lockdep_assert_held(&console_mutex);
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ return;
+
+ if (!printk_threads_enabled || nbcon_legacy_kthread)
+ return;
+
+ kt = kthread_run(nbcon_legacy_kthread_func, NULL, "pr/legacy");
+ if (IS_ERR(kt)) {
+ pr_err("unable to start legacy printing thread\n");
+ return;
+ }
+
+ nbcon_legacy_kthread = kt;
+
+ /*
+ * It is important that console printing threads are scheduled
+ * shortly after a printk call and with generous runtime budgets.
+ */
+ sched_set_normal(nbcon_legacy_kthread, -20);
+}
+#endif /* CONFIG_PRINTK */
+
static int __read_mostly keep_bootcon;
static int __init keep_bootcon_setup(char *str)
newcon->flags |= CON_CONSDEV;
}
-#define con_printk(lvl, con, fmt, ...) \
- printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
- (con->flags & CON_BOOT) ? "boot" : "", \
- con->name, con->index, ##__VA_ARGS__)
-
static void console_init_seq(struct console *newcon, bool bootcon_registered)
{
struct console *con;
newcon->seq = prb_next_seq(prb);
for_each_console(con) {
- if ((con->flags & CON_BOOT) &&
- (con->flags & CON_ENABLED) &&
- con->seq < newcon->seq) {
- newcon->seq = con->seq;
+ u64 seq;
+
+ if (!((con->flags & CON_BOOT) &&
+ (con->flags & CON_ENABLED))) {
+ continue;
}
+
+ if (con->flags & CON_NBCON)
+ seq = nbcon_seq_read(con);
+ else
+ seq = con->seq;
+
+ if (seq < newcon->seq)
+ newcon->seq = seq;
}
}
goto unlock;
}
+ if (newcon->flags & CON_NBCON) {
+ /*
+ * Ensure the nbcon console buffers can be allocated
+ * before modifying any global data.
+ */
+ if (!nbcon_alloc(newcon))
+ goto unlock;
+ }
+
/*
* See if we want to enable this console driver by default.
*
err = try_enable_preferred_console(newcon, false);
/* printk() messages are not printed to the Braille console. */
- if (err || newcon->flags & CON_BRL)
+ if (err || newcon->flags & CON_BRL) {
+ if (newcon->flags & CON_NBCON)
+ nbcon_free(newcon);
goto unlock;
+ }
/*
* If we have a bootconsole, and are switching to a real console,
newcon->dropped = 0;
console_init_seq(newcon, bootcon_registered);
+ if (newcon->flags & CON_NBCON) {
+ have_nbcon_console = true;
+ nbcon_init(newcon);
+ } else {
+ have_legacy_console = true;
+ nbcon_legacy_kthread_create();
+ }
+
+ if (newcon->flags & CON_BOOT)
+ have_boot_console = true;
+
/*
* Put this console in the list - keep the
* preferred driver at the head of the list.
/* Must be called under console_list_lock(). */
static int unregister_console_locked(struct console *console)
{
+ bool is_boot_con = (console->flags & CON_BOOT);
+ bool found_legacy_con = false;
+ bool found_nbcon_con = false;
+ bool found_boot_con = false;
+ struct console *c;
int res;
lockdep_assert_console_list_lock_held();
*/
synchronize_srcu(&console_srcu);
+ if (console->flags & CON_NBCON)
+ nbcon_free(console);
+
console_sysfs_notify();
if (console->exit)
res = console->exit(console);
+ /*
+ * With this console gone, the global flags tracking registered
+ * console types may have changed. Update them.
+ */
+ for_each_console(c) {
+ if (c->flags & CON_BOOT)
+ found_boot_con = true;
+
+ if (c->flags & CON_NBCON)
+ found_nbcon_con = true;
+ else
+ found_legacy_con = true;
+ }
+ if (!found_boot_con)
+ have_boot_console = false;
+ if (!found_legacy_con)
+ have_legacy_console = false;
+ if (!found_nbcon_con)
+ have_nbcon_console = false;
+
+ /*
+ * When the last boot console unregisters, start up the
+ * printing threads.
+ */
+ if (is_boot_con && !have_boot_console) {
+ for_each_console(c)
+ nbcon_kthread_create(c);
+ }
+
+#ifdef CONFIG_PRINTK
+ if (!printing_via_unlock && nbcon_legacy_kthread) {
+ kthread_stop(nbcon_legacy_kthread);
+ nbcon_legacy_kthread = NULL;
+ }
+#endif
+
return res;
}
/* If @con is specified, only wait for that console. Otherwise wait for all. */
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
{
- int remaining = timeout_ms;
+ unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
+ unsigned long remaining_jiffies = timeout_jiffies;
struct console *c;
u64 last_diff = 0;
u64 printk_seq;
+ short flags;
+ bool locked;
int cookie;
u64 diff;
u64 seq;
might_sleep();
- seq = prb_next_seq(prb);
+ seq = prb_next_reserve_seq(prb);
- /* Flush the consoles so that records up to @seq are printed. */
- console_lock();
- console_unlock();
+ /*
+ * Flush the consoles so that records up to @seq are printed.
+ * Otherwise this function will just wait for the threaded printers
+ * to print up to @seq.
+ */
+ if (printing_via_unlock && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ console_lock();
+ console_unlock();
+ }
for (;;) {
+ unsigned long begin_jiffies;
+ unsigned long slept_jiffies;
+
+ locked = false;
diff = 0;
- /*
- * Hold the console_lock to guarantee safe access to
- * console->seq. Releasing console_lock flushes more
- * records in case @seq is still not printed on all
- * usable consoles.
- */
- console_lock();
+ if (printing_via_unlock) {
+ /*
+ * Hold the console_lock to guarantee safe access to
+ * console->seq. Releasing console_lock flushes more
+ * records in case @seq is still not printed on all
+ * usable consoles.
+ */
+ console_lock();
+ locked = true;
+ }
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
if (con && con != c)
continue;
+
+ flags = console_srcu_read_flags(c);
+
/*
* If consoles are not usable, it cannot be expected
* that they make forward progress, so only increment
* @diff for usable consoles.
*/
- if (!console_is_usable(c))
+ if (!console_is_usable(c, flags, true) &&
+ !console_is_usable(c, flags, false)) {
continue;
- printk_seq = c->seq;
+ }
+
+ if (flags & CON_NBCON) {
+ printk_seq = nbcon_seq_read(c);
+ } else {
+ WARN_ON_ONCE(!locked);
+ printk_seq = c->seq;
+ }
+
if (printk_seq < seq)
diff += seq - printk_seq;
}
console_srcu_read_unlock(cookie);
if (diff != last_diff && reset_on_progress)
- remaining = timeout_ms;
+ remaining_jiffies = timeout_jiffies;
- console_unlock();
+ if (locked)
+ console_unlock();
/* Note: @diff is 0 if there are no usable consoles. */
- if (diff == 0 || remaining == 0)
+ if (diff == 0 || remaining_jiffies == 0)
break;
- if (remaining < 0) {
- /* no timeout limit */
- msleep(100);
- } else if (remaining < 100) {
- msleep(remaining);
- remaining = 0;
- } else {
- msleep(100);
- remaining -= 100;
- }
+ /* msleep(1) might sleep much longer. Check time by jiffies. */
+ begin_jiffies = jiffies;
+ msleep(1);
+ slept_jiffies = jiffies - begin_jiffies;
+
+ remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
last_diff = diff;
}
int pending = this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_OUTPUT) {
- /* If trylock fails, someone else is doing the printing */
- if (console_trylock())
- console_unlock();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ wake_up_interruptible(&legacy_wait);
+ } else {
+ /*
+ * If trylock fails, some other context
+ * will do the printing.
+ */
+ if (console_trylock())
+ console_unlock();
+ }
}
if (pending & PRINTK_PENDING_WAKEUP)
* New messages may have been added directly to the ringbuffer
* using vprintk_store(), so wake any waiters as well.
*/
- __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
+ int val = PRINTK_PENDING_WAKEUP;
+
+ if (printing_via_unlock)
+ val |= PRINTK_PENDING_OUTPUT;
+ __wake_up_klogd(val);
}
void printk_trigger_flush(void)
{
+ nbcon_wake_threads();
defer_console_output();
}
#include <linux/errno.h>
#include <linux/bug.h>
#include "printk_ringbuffer.h"
+#include "internal.h"
/**
* DOC: printk_ringbuffer overview
*
* desc_push_tail:B / desc_reserve:D
* set descriptor reusable (state), then push descriptor tail (id)
+ *
+ * desc_update_last_finalized:A / desc_last_finalized_seq:A
+ * store finalized record, then set new highest finalized sequence number
*/
#define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits)
unsigned long next_lpos;
if (size == 0) {
- /* Specify a data-less block. */
- blk_lpos->begin = NO_LPOS;
- blk_lpos->next = NO_LPOS;
+ /*
+ * Data blocks are not created for empty lines. Instead, the
+ * reader will recognize these special lpos values and handle
+ * it appropriately.
+ */
+ blk_lpos->begin = EMPTY_LINE_LPOS;
+ blk_lpos->next = EMPTY_LINE_LPOS;
return NULL;
}
/* Data-less data block description. */
if (BLK_DATALESS(blk_lpos)) {
- if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
+ /*
+ * Records that are just empty lines are also valid, even
+ * though they do not have a data block. For such records
+ * explicitly return empty string data to signify success.
+ */
+ if (blk_lpos->begin == EMPTY_LINE_LPOS &&
+ blk_lpos->next == EMPTY_LINE_LPOS) {
*data_size = 0;
return "";
}
+
+ /* Data lost, invalid, or otherwise unavailable. */
return NULL;
}
}
/*
+ * @last_finalized_seq value guarantees that all records up to and including
+ * this sequence number are finalized and can be read. The only exception are
+ * too old records which have already been overwritten.
+ *
+ * It is also guaranteed that @last_finalized_seq only increases.
+ *
+ * Be aware that finalized records following non-finalized records are not
+ * reported because they are not yet available to the reader. For example,
+ * a new record stored via printk() will not be available to a printer if
+ * it follows a record that has not been finalized yet. However, once that
+ * non-finalized record becomes finalized, @last_finalized_seq will be
+ * appropriately updated and the full set of finalized records will be
+ * available to the printer. And since each printk() caller will either
+ * directly print or trigger deferred printing of all available unprinted
+ * records, all printk() messages will get printed.
+ */
+static u64 desc_last_finalized_seq(struct printk_ringbuffer *rb)
+{
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ unsigned long ulseq;
+
+ /*
+ * Guarantee the sequence number is loaded before loading the
+ * associated record in order to guarantee that the record can be
+ * seen by this CPU. This pairs with desc_update_last_finalized:A.
+ */
+ ulseq = atomic_long_read_acquire(&desc_ring->last_finalized_seq
+ ); /* LMM(desc_last_finalized_seq:A) */
+
+ return __ulseq_to_u64seq(rb, ulseq);
+}
+
+static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
+ struct printk_record *r, unsigned int *line_count);
+
+/*
+ * Check if there are records directly following @last_finalized_seq that are
+ * finalized. If so, update @last_finalized_seq to the latest of these
+ * records. It is not allowed to skip over records that are not yet finalized.
+ */
+static void desc_update_last_finalized(struct printk_ringbuffer *rb)
+{
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ u64 old_seq = desc_last_finalized_seq(rb);
+ unsigned long oldval;
+ unsigned long newval;
+ u64 finalized_seq;
+ u64 try_seq;
+
+try_again:
+ finalized_seq = old_seq;
+ try_seq = finalized_seq + 1;
+
+ /* Try to find later finalized records. */
+ while (_prb_read_valid(rb, &try_seq, NULL, NULL)) {
+ finalized_seq = try_seq;
+ try_seq++;
+ }
+
+ /* No update needed if no later finalized record was found. */
+ if (finalized_seq == old_seq)
+ return;
+
+ oldval = __u64seq_to_ulseq(old_seq);
+ newval = __u64seq_to_ulseq(finalized_seq);
+
+ /*
+ * Set the sequence number of a later finalized record that has been
+ * seen.
+ *
+ * Guarantee the record data is visible to other CPUs before storing
+ * its sequence number. This pairs with desc_last_finalized_seq:A.
+ *
+ * Memory barrier involvement:
+ *
+ * If desc_last_finalized_seq:A reads from
+ * desc_update_last_finalized:A, then desc_read:A reads from
+ * _prb_commit:B.
+ *
+ * Relies on:
+ *
+ * RELEASE from _prb_commit:B to desc_update_last_finalized:A
+ * matching
+ * ACQUIRE from desc_last_finalized_seq:A to desc_read:A
+ *
+ * Note: _prb_commit:B and desc_update_last_finalized:A can be
+ * different CPUs. However, the desc_update_last_finalized:A
+ * CPU (which performs the release) must have previously seen
+ * _prb_commit:B.
+ */
+ if (!atomic_long_try_cmpxchg_release(&desc_ring->last_finalized_seq,
+ &oldval, newval)) { /* LMM(desc_update_last_finalized:A) */
+ old_seq = __ulseq_to_u64seq(rb, oldval);
+ goto try_again;
+ }
+}
+
+/*
* Attempt to finalize a specified descriptor. If this fails, the descriptor
* is either already final or it will finalize itself when the writer commits.
*/
-static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
+static void desc_make_final(struct printk_ringbuffer *rb, unsigned long id)
{
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
unsigned long prev_state_val = DESC_SV(id, desc_committed);
struct prb_desc *d = to_desc(desc_ring, id);
- atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
- DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
-
- /* Best effort to remember the last finalized @id. */
- atomic_long_set(&desc_ring->last_finalized_id, id);
+ if (atomic_long_try_cmpxchg_relaxed(&d->state_var, &prev_state_val,
+ DESC_SV(id, desc_finalized))) { /* LMM(desc_make_final:A) */
+ desc_update_last_finalized(rb);
+ }
}
/**
* readers. (For seq==0 there is no previous descriptor.)
*/
if (info->seq > 0)
- desc_make_final(desc_ring, DESC_ID(id - 1));
+ desc_make_final(rb, DESC_ID(id - 1));
r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
/* If text data allocation fails, a data-less record is committed. */
*/
head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
if (head_id != e->id)
- desc_make_final(desc_ring, e->id);
+ desc_make_final(e->rb, e->id);
}
/**
*/
void prb_final_commit(struct prb_reserved_entry *e)
{
- struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
-
_prb_commit(e, desc_finalized);
- /* Best effort to remember the last finalized @id. */
- atomic_long_set(&desc_ring->last_finalized_id, e->id);
+ desc_update_last_finalized(e->rb);
}
/*
* descriptor. However, it also verifies that the record is finalized and has
* the sequence number @seq. On success, 0 is returned.
*
+ * For the panic CPU, committed descriptors are also considered finalized.
+ *
* Error return values:
* -EINVAL: A finalized record with sequence number @seq does not exist.
* -ENOENT: A finalized record with sequence number @seq exists, but its data
/*
* An unexpected @id (desc_miss) or @seq mismatch means the record
- * does not exist. A descriptor in the reserved or committed state
- * means the record does not yet exist for the reader.
+ * does not exist. A descriptor in the reserved state means the
+ * record does not yet exist for the reader.
*/
if (d_state == desc_miss ||
d_state == desc_reserved ||
- d_state == desc_committed ||
s != seq) {
return -EINVAL;
}
/*
+ * A descriptor in the committed state means the record does not yet
+ * exist for the reader. However, for the panic CPU, committed
+ * records are also handled as finalized records since they contain
+ * message data in a consistent state and may contain additional
+ * hints as to the cause of the panic.
+ */
+ if (d_state == desc_committed && !this_cpu_in_panic())
+ return -EINVAL;
+
+ /*
* A descriptor in the reusable state may no longer have its data
* available; report it as existing but with lost data. Or the record
* may actually be a record with lost data.
}
/* Get the sequence number of the tail descriptor. */
-static u64 prb_first_seq(struct printk_ringbuffer *rb)
+u64 prb_first_seq(struct printk_ringbuffer *rb)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
enum desc_state d_state;
return seq;
}
+/**
+ * prb_next_reserve_seq() - Get the sequence number after the most recently
+ * reserved record.
+ *
+ * @rb: The ringbuffer to get the sequence number from.
+ *
+ * This is the public function available to readers to see what sequence
+ * number will be assigned to the next reserved record.
+ *
+ * Note that depending on the situation, this value can be equal to or
+ * higher than the sequence number returned by prb_next_seq().
+ *
+ * Context: Any context.
+ * Return: The sequence number that will be assigned to the next record
+ * reserved.
+ */
+u64 prb_next_reserve_seq(struct printk_ringbuffer *rb)
+{
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ unsigned long last_finalized_id;
+ atomic_long_t *state_var;
+ u64 last_finalized_seq;
+ unsigned long head_id;
+ struct prb_desc desc;
+ unsigned long diff;
+ struct prb_desc *d;
+ int err;
+
+ /*
+ * It may not be possible to read a sequence number for @head_id.
+ * So the ID of @last_finailzed_seq is used to calculate what the
+ * sequence number of @head_id will be.
+ */
+
+try_again:
+ last_finalized_seq = desc_last_finalized_seq(rb);
+
+ /*
+ * @head_id is loaded after @last_finalized_seq to ensure that it is
+ * at or beyond @last_finalized_seq.
+ *
+ * Memory barrier involvement:
+ *
+ * If desc_last_finalized_seq:A reads from
+ * desc_update_last_finalized:A, then
+ * prb_next_reserve_seq:A reads from desc_reserve:D.
+ *
+ * Relies on:
+ *
+ * RELEASE from desc_reserve:D to desc_update_last_finalized:A
+ * matching
+ * ACQUIRE from desc_last_finalized_seq:A to prb_next_reserve_seq:A
+ *
+ * Note: desc_reserve:D and desc_update_last_finalized:A can be
+ * different CPUs. However, the desc_update_last_finalized:A CPU
+ * (which performs the release) must have previously seen
+ * desc_read:C, which implies desc_reserve:D can be seen.
+ */
+ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_next_reserve_seq:A) */
+
+ d = to_desc(desc_ring, last_finalized_seq);
+ state_var = &d->state_var;
+
+ /* Extract the ID, used to specify the descriptor to read. */
+ last_finalized_id = DESC_ID(atomic_long_read(state_var));
+
+ /* Ensure @last_finalized_id is correct. */
+ err = desc_read_finalized_seq(desc_ring, last_finalized_id, last_finalized_seq, &desc);
+
+ if (err == -EINVAL) {
+ if (last_finalized_seq == 0) {
+ /*
+ * @last_finalized_seq still contains its initial
+ * value. Probably no record has been finalized yet.
+ * This means the ringbuffer is not yet full and the
+ * @head_id value can be used directly (subtracting
+ * off the id value corresponding to seq=0).
+ */
+
+ /*
+ * Because of hack#2 of the bootstrapping phase, the
+ * @head_id initial value must be handled separately.
+ */
+ if (head_id == DESC0_ID(desc_ring->count_bits))
+ return 0;
+
+ /*
+ * The @head_id is initialized such that the first
+ * increment will yield the first record (seq=0).
+ * Therefore use the initial value +1 as the base to
+ * subtract from @head_id.
+ */
+ last_finalized_id = DESC0_ID(desc_ring->count_bits) + 1;
+ } else {
+ /* Record must have been overwritten. Try again. */
+ goto try_again;
+ }
+ }
+
+ /*
+ * @diff is the number of records beyond the last record available
+ * to readers.
+ */
+ diff = head_id - last_finalized_id;
+
+ /*
+ * @head_id points to the most recently reserved record, but this
+ * function returns the sequence number that will be assigned to the
+ * next (not yet reserved) record. Thus +1 is needed.
+ */
+ return (last_finalized_seq + diff + 1);
+}
+
/*
- * Non-blocking read of a record. Updates @seq to the last finalized record
- * (which may have no data available).
+ * Non-blocking read of a record.
*
- * See the description of prb_read_valid() and prb_read_valid_info()
- * for details.
+ * On success @seq is updated to the record that was read and (if provided)
+ * @r and @line_count will contain the read/calculated data.
+ *
+ * On failure @seq is updated to a record that is not yet available to the
+ * reader, but it will be the next record available to the reader.
+ *
+ * Note: When the current CPU is in panic, this function will skip over any
+ * non-existent/non-finalized records in order to allow the panic CPU
+ * to print any and all records that have been finalized.
*/
static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
struct printk_record *r, unsigned int *line_count)
*seq = tail_seq;
} else if (err == -ENOENT) {
- /* Record exists, but no data available. Skip. */
+ /* Record exists, but the data was lost. Skip. */
(*seq)++;
} else {
- /* Non-existent/non-finalized record. Must stop. */
- return false;
+ /*
+ * Non-existent/non-finalized record. Must stop.
+ *
+ * For panic situations it cannot be expected that
+ * non-finalized records will become finalized. But
+ * there may be other finalized records beyond that
+ * need to be printed for a panic situation. If this
+ * is the panic CPU, skip this
+ * non-existent/non-finalized record unless it is
+ * at or beyond the head, in which case it is not
+ * possible to continue.
+ *
+ * Note that new messages printed on panic CPU are
+ * finalized when we are here. The only exception
+ * might be the last message without trailing newline.
+ * But it would have the sequence number returned
+ * by "prb_next_reserve_seq() - 1".
+ */
+ if (this_cpu_in_panic() && ((*seq + 1) < prb_next_reserve_seq(rb)))
+ (*seq)++;
+ else
+ return false;
}
}
* On success, the reader must check r->info.seq to see which record was
* actually read. This allows the reader to detect dropped records.
*
- * Failure means @seq refers to a not yet written record.
+ * Failure means @seq refers to a record not yet available to the reader.
*/
bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
struct printk_record *r)
* On success, the reader must check info->seq to see which record meta data
* was actually read. This allows the reader to detect dropped records.
*
- * Failure means @seq refers to a not yet written record.
+ * Failure means @seq refers to a record not yet available to the reader.
*/
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
struct printk_info *info, unsigned int *line_count)
* newest sequence number available to readers will be.
*
* This provides readers a sequence number to jump to if all currently
- * available records should be skipped.
+ * available records should be skipped. It is guaranteed that all records
+ * previous to the returned value have been finalized and are (or were)
+ * available to the reader.
*
* Context: Any context.
* Return: The sequence number of the next newest (not yet available) record
*/
u64 prb_next_seq(struct printk_ringbuffer *rb)
{
- struct prb_desc_ring *desc_ring = &rb->desc_ring;
- enum desc_state d_state;
- unsigned long id;
u64 seq;
- /* Check if the cached @id still points to a valid @seq. */
- id = atomic_long_read(&desc_ring->last_finalized_id);
- d_state = desc_read(desc_ring, id, NULL, &seq, NULL);
+ seq = desc_last_finalized_seq(rb);
- if (d_state == desc_finalized || d_state == desc_reusable) {
- /*
- * Begin searching after the last finalized record.
- *
- * On 0, the search must begin at 0 because of hack#2
- * of the bootstrapping phase it is not known if a
- * record at index 0 exists.
- */
- if (seq != 0)
- seq++;
- } else {
- /*
- * The information about the last finalized sequence number
- * has gone. It should happen only when there is a flood of
- * new messages and the ringbuffer is rapidly recycled.
- * Give up and start from the beginning.
- */
- seq = 0;
- }
+ /*
+ * Begin searching after the last finalized record.
+ *
+ * On 0, the search must begin at 0 because of hack#2
+ * of the bootstrapping phase it is not known if a
+ * record at index 0 exists.
+ */
+ if (seq != 0)
+ seq++;
/*
* The information about the last finalized @seq might be inaccurate.
rb->desc_ring.infos = infos;
atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
- atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits));
+ atomic_long_set(&rb->desc_ring.last_finalized_seq, 0);
rb->text_data_ring.size_bits = textbits;
rb->text_data_ring.data = text_buf;
struct printk_info *infos;
atomic_long_t head_id;
atomic_long_t tail_id;
- atomic_long_t last_finalized_id;
+ atomic_long_t last_finalized_seq;
};
/*
#define DESC_SV(id, state) (((unsigned long)state << DESC_FLAGS_SHIFT) | id)
#define DESC_ID_MASK (~DESC_FLAGS_MASK)
#define DESC_ID(sv) ((sv) & DESC_ID_MASK)
+
+/*
+ * Special data block logical position values (for fields of
+ * @prb_desc.text_blk_lpos).
+ *
+ * - Bit0 is used to identify if the record has no data block. (Implemented in
+ * the LPOS_DATALESS() macro.)
+ *
+ * - Bit1 specifies the reason for not having a data block.
+ *
+ * These special values could never be real lpos values because of the
+ * meta data and alignment padding of data blocks. (See to_blk_size() for
+ * details.)
+ */
#define FAILED_LPOS 0x1
-#define NO_LPOS 0x3
+#define EMPTY_LINE_LPOS 0x3
#define FAILED_BLK_LPOS \
{ \
.infos = &_##name##_infos[0], \
.head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
.tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
- .last_finalized_id = ATOMIC_INIT(DESC0_ID(descbits)), \
+ .last_finalized_seq = ATOMIC_INIT(0), \
}, \
.text_data_ring = { \
.size_bits = (avgtextbits) + (descbits), \
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
struct printk_info *info, unsigned int *line_count);
+u64 prb_first_seq(struct printk_ringbuffer *rb);
u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
u64 prb_next_seq(struct printk_ringbuffer *rb);
+u64 prb_next_reserve_seq(struct printk_ringbuffer *rb);
+
+#ifdef CONFIG_64BIT
+
+#define __u64seq_to_ulseq(u64seq) (u64seq)
+#define __ulseq_to_u64seq(rb, ulseq) (ulseq)
+
+#else /* CONFIG_64BIT */
+
+#define __u64seq_to_ulseq(u64seq) ((u32)u64seq)
+
+static inline u64 __ulseq_to_u64seq(struct printk_ringbuffer *rb, u32 ulseq)
+{
+ u64 rb_first_seq = prb_first_seq(rb);
+ u64 seq;
+
+ /*
+ * The provided sequence is only the lower 32 bits of the ringbuffer
+ * sequence. It needs to be expanded to 64bit. Get the first sequence
+ * number from the ringbuffer and fold it.
+ *
+ * Having a 32bit representation in the console is sufficient.
+ * If a console ever gets more than 2^31 records behind
+ * the ringbuffer then this is the least of the problems.
+ *
+ * Also the access to the ring buffer is always safe.
+ */
+ seq = rb_first_seq - (s32)((u32)rb_first_seq - ulseq);
+
+ return seq;
+}
+
+#endif /* CONFIG_64BIT */
#endif /* _KERNEL_PRINTK_RINGBUFFER_H */
this_cpu_dec(printk_context);
}
+void __printk_deferred_enter(void)
+{
+ cant_migrate();
+ this_cpu_inc(printk_context);
+}
+
+void __printk_deferred_exit(void)
+{
+ cant_migrate();
+ this_cpu_dec(printk_context);
+}
+
asmlinkage int vprintk(const char *fmt, va_list args)
{
#ifdef CONFIG_KGDB_KDB
WARN_ON_ONCE(!t);
sp.sched_priority = 2;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+#ifdef CONFIG_PREEMPT_RT
+ t = per_cpu(timersd, cpu);
+ WARN_ON_ONCE(!t);
+ sp.sched_priority = 2;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+#endif
}
/* Don't allow time recalculation while creating a new task. */
*/
#include <linux/kvm_para.h>
+#include <linux/console.h>
//////////////////////////////////////////////////////////////////////////////
//
if (rcu_stall_is_suppressed())
return;
+ nbcon_cpu_emergency_enter();
+
/*
* OK, time to rat on our buddy...
* See Documentation/RCU/stallwarn.rst for info on how to debug
panic_on_rcu_stall();
rcu_force_quiescent_state(); /* Kick them all. */
+
+ nbcon_cpu_emergency_exit();
}
static void print_cpu_stall(unsigned long gps)
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
/*
- * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
+ * Atomically set TIF_NEED_RESCHED[_LAZY] and test for TIF_POLLING_NRFLAG,
* this avoids any races wrt polling state changes and thereby avoids
* spurious IPIs.
*/
-static inline bool set_nr_and_not_polling(struct task_struct *p)
+static inline bool set_nr_and_not_polling(struct task_struct *p, int tif_bit)
{
struct thread_info *ti = task_thread_info(p);
- return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
+
+ return !(fetch_or(&ti->flags, 1 << tif_bit) & _TIF_POLLING_NRFLAG);
}
/*
for (;;) {
if (!(val & _TIF_POLLING_NRFLAG))
return false;
- if (val & _TIF_NEED_RESCHED)
+ if (val & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
return true;
if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
break;
}
#else
-static inline bool set_nr_and_not_polling(struct task_struct *p)
+static inline bool set_nr_and_not_polling(struct task_struct *p, int tif_bit)
{
- set_tsk_need_resched(p);
+ set_tsk_thread_flag(p, tif_bit);
return true;
}
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
-void resched_curr(struct rq *rq)
+static void __resched_curr(struct rq *rq, int lazy)
{
+ int cpu, tif_bit = TIF_NEED_RESCHED + lazy;
struct task_struct *curr = rq->curr;
- int cpu;
lockdep_assert_rq_held(rq);
- if (test_tsk_need_resched(curr))
+ if (unlikely(test_tsk_thread_flag(curr, tif_bit)))
return;
cpu = cpu_of(rq);
if (cpu == smp_processor_id()) {
- set_tsk_need_resched(curr);
- set_preempt_need_resched();
+ set_tsk_thread_flag(curr, tif_bit);
+ if (!lazy)
+ set_preempt_need_resched();
return;
}
- if (set_nr_and_not_polling(curr))
- smp_send_reschedule(cpu);
- else
+ if (set_nr_and_not_polling(curr, tif_bit)) {
+ if (!lazy)
+ smp_send_reschedule(cpu);
+ } else {
trace_sched_wake_idle_without_ipi(cpu);
+ }
+}
+
+void resched_curr(struct rq *rq)
+{
+ __resched_curr(rq, 0);
+}
+
+void resched_curr_lazy(struct rq *rq)
+{
+ int lazy = IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) && !sched_feat(FORCE_NEED_RESCHED) ?
+ TIF_NEED_RESCHED_LAZY_OFFSET : 0;
+
+ if (lazy && unlikely(test_tsk_thread_flag(rq->curr, TIF_NEED_RESCHED)))
+ return;
+
+ __resched_curr(rq, lazy);
}
void resched_cpu(int cpu)
if (cpu == smp_processor_id())
return;
- if (set_nr_and_not_polling(rq->idle))
+ if (set_nr_and_not_polling(rq->idle, TIF_NEED_RESCHED))
smp_send_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
static inline void sched_submit_work(struct task_struct *tsk)
{
+ static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
unsigned int task_flags;
- if (task_is_running(tsk))
- return;
+ /*
+ * Establish LD_WAIT_CONFIG context to ensure none of the code called
+ * will use a blocking primitive -- which would lead to recursion.
+ */
+ lock_map_acquire_try(&sched_map);
task_flags = tsk->flags;
/*
* make sure to submit it to avoid deadlocks.
*/
blk_flush_plug(tsk->plug, true);
+
+ lock_map_release(&sched_map);
}
static void sched_update_worker(struct task_struct *tsk)
}
}
-asmlinkage __visible void __sched schedule(void)
+static __always_inline void __schedule_loop(unsigned int sched_mode)
{
- struct task_struct *tsk = current;
-
- sched_submit_work(tsk);
do {
preempt_disable();
- __schedule(SM_NONE);
+ __schedule(sched_mode);
sched_preempt_enable_no_resched();
} while (need_resched());
+}
+
+asmlinkage __visible void __sched schedule(void)
+{
+ struct task_struct *tsk = current;
+
+#ifdef CONFIG_RT_MUTEXES
+ lockdep_assert(!tsk->sched_rt_mutex);
+#endif
+
+ if (!task_is_running(tsk))
+ sched_submit_work(tsk);
+ __schedule_loop(SM_NONE);
sched_update_worker(tsk);
}
EXPORT_SYMBOL(schedule);
#ifdef CONFIG_PREEMPT_RT
void __sched notrace schedule_rtlock(void)
{
- do {
- preempt_disable();
- __schedule(SM_RTLOCK_WAIT);
- sched_preempt_enable_no_resched();
- } while (need_resched());
+ __schedule_loop(SM_RTLOCK_WAIT);
}
NOKPROBE_SYMBOL(schedule_rtlock);
#endif
#ifdef CONFIG_RT_MUTEXES
+/*
+ * Would be more useful with typeof()/auto_type but they don't mix with
+ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
+ * name such that if someone were to implement this function we get to compare
+ * notes.
+ */
+#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
+
+void rt_mutex_pre_schedule(void)
+{
+ lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
+ sched_submit_work(current);
+}
+
+void rt_mutex_schedule(void)
+{
+ lockdep_assert(current->sched_rt_mutex);
+ __schedule_loop(SM_NONE);
+}
+
+void rt_mutex_post_schedule(void)
+{
+ sched_update_worker(current);
+ lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
+}
+
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
{
if (pi_task)
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
+/*
+ * task_is_pi_boosted - Check if task has been PI boosted.
+ * @p: Task to check.
+ *
+ * Return true if task is subject to priority inheritance.
+ */
+bool task_is_pi_boosted(const struct task_struct *p)
+{
+ int prio = p->prio;
+
+ if (!rt_prio(prio))
+ return false;
+ return prio != p->normal_prio;
+}
+
/**
* yield - yield the current processor to other threads.
*
.release = seq_release,
};
+static ssize_t sched_hog_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long end = jiffies + 60 * HZ;
+
+ for (; time_before(jiffies, end) && !signal_pending(current);)
+ cpu_relax();
+
+ return cnt;
+}
+
+static const struct file_operations sched_hog_fops = {
+ .write = sched_hog_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
static struct dentry *debugfs_sched;
static __init int sched_init_debug(void)
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
+ debugfs_create_file("hog", 0200, debugfs_sched, NULL, &sched_hog_fops);
+
return 0;
}
late_initcall(sched_init_debug);
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough.
*/
-static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se, bool tick)
{
+ struct rq *rq = rq_of(cfs_rq);
+
if ((s64)(se->vruntime - se->deadline) < 0)
return;
/*
* The task has consumed its request, reschedule.
*/
- if (cfs_rq->nr_running > 1) {
- resched_curr(rq_of(cfs_rq));
- clear_buddies(cfs_rq, se);
+ if (cfs_rq->nr_running < 2)
+ return;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) || sched_feat(FORCE_NEED_RESCHED)) {
+ resched_curr(rq);
+ } else {
+ /* Did the task ignore the lazy reschedule request? */
+ if (tick && test_tsk_thread_flag(rq->curr, TIF_NEED_RESCHED_LAZY))
+ resched_curr(rq);
+ else
+ resched_curr_lazy(rq);
}
+ clear_buddies(cfs_rq, se);
}
#include "pelt.h"
/*
* Update the current task's runtime statistics.
*/
-static void update_curr(struct cfs_rq *cfs_rq)
+static void __update_curr(struct cfs_rq *cfs_rq, bool tick)
{
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
schedstat_add(cfs_rq->exec_clock, delta_exec);
curr->vruntime += calc_delta_fair(delta_exec, curr);
- update_deadline(cfs_rq, curr);
+ update_deadline(cfs_rq, curr, tick);
update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) {
account_cfs_rq_runtime(cfs_rq, delta_exec);
}
+static inline void update_curr(struct cfs_rq *cfs_rq)
+{
+ __update_curr(cfs_rq, false);
+}
+
static void update_curr_fair(struct rq *rq)
{
update_curr(cfs_rq_of(&rq->curr->se));
/*
* Update run-time statistics of the 'current'.
*/
- update_curr(cfs_rq);
+ __update_curr(cfs_rq, true);
/*
* Ensure that runnable average is periodically updated.
* validating it and just reschedule.
*/
if (queued) {
- resched_curr(rq_of(cfs_rq));
+ resched_curr_lazy(rq_of(cfs_rq));
return;
}
/*
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
- resched_curr(rq_of(cfs_rq));
+ resched_curr_lazy(rq_of(cfs_rq));
}
static __always_inline
/* Determine whether we need to wake up potentially idle CPU: */
if (rq->curr == rq->idle && rq->cfs.nr_running)
- resched_curr(rq);
+ resched_curr_lazy(rq);
}
#ifdef CONFIG_SMP
if (delta < 0) {
if (task_current(rq, p))
- resched_curr(rq);
+ resched_curr_lazy(rq);
return;
}
hrtick_start(rq, delta);
* prevents us from potentially nominating it as a false LAST_BUDDY
* below.
*/
- if (test_tsk_need_resched(curr))
+ if (need_resched())
return;
/* Idle tasks are by definition preempted by non-idle tasks. */
return;
preempt:
- resched_curr(rq);
+ resched_curr_lazy(rq);
}
#ifdef CONFIG_SMP
*/
if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
- resched_curr(rq);
+ resched_curr_lazy(rq);
}
/*
*/
if (task_current(rq, p)) {
if (p->prio > oldprio)
- resched_curr(rq);
+ resched_curr_lazy(rq);
} else
check_preempt_curr(rq, p, 0);
}
SCHED_FEAT(LATENCY_WARN, false)
SCHED_FEAT(HZ_BW, true)
+
+SCHED_FEAT(FORCE_NEED_RESCHED, false)
ct_cpuidle_enter();
raw_local_irq_enable();
- while (!tif_need_resched() &&
- (cpu_idle_force_poll || tick_check_broadcast_expired()))
+ while (!need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired()))
cpu_relax();
raw_local_irq_disable();
rd->rto_cpu = cpu;
- if (cpu < nr_cpu_ids)
+ if (cpu < nr_cpu_ids) {
+ if (!has_pushable_tasks(cpu_rq(cpu)))
+ continue;
return cpu;
+ }
rd->rto_cpu = -1;
extern void reweight_task(struct task_struct *p, int prio);
extern void resched_curr(struct rq *rq);
+extern void resched_curr_lazy(struct rq *rq);
extern void resched_cpu(int cpu);
extern struct rt_bandwidth def_rt_bandwidth;
do_notify_parent_cldstop(current, false, why);
/*
- * Don't want to allow preemption here, because
- * sys_ptrace() needs this task to be inactive.
+ * The previous do_notify_parent_cldstop() invocation woke ptracer.
+ * One a PREEMPTION kernel this can result in preemption requirement
+ * which will be fulfilled after read_unlock() and the ptracer will be
+ * put on the CPU.
+ * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
+ * this task wait in schedule(). If this task gets preempted then it
+ * remains enqueued on the runqueue. The ptracer will observe this and
+ * then sleep for a delay of one HZ tick. In the meantime this task
+ * gets scheduled, enters schedule() and will wait for the ptracer.
*
- * XXX: implement read_unlock_no_resched().
+ * This preemption point is not bad from correctness point of view but
+ * extends the runtime by one HZ tick time due to the ptracer's sleep.
+ * The preempt-disable section ensures that there will be no preemption
+ * between unlock and schedule() and so improving the performance since
+ * the ptracer has no reason to sleep.
+ *
+ * On PREEMPT_RT locking tasklist_lock does not disable preemption.
+ * Therefore the task can be preempted (after
+ * do_notify_parent_cldstop()) before unlocking tasklist_lock so there
+ * is no benefit in doing this. The optimisation is harmful on
+ * PEEMPT_RT because the spinlock_t (in cgroup_enter_frozen()) must not
+ * be acquired with disabled preemption.
*/
- preempt_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
read_unlock(&tasklist_lock);
cgroup_enter_frozen();
- preempt_enable_no_resched();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable_no_resched();
schedule();
cgroup_leave_frozen(true);
}
EXPORT_SYMBOL(__local_bh_enable_ip);
+void softirq_preempt(void)
+{
+ if (WARN_ON_ONCE(!preemptible()))
+ return;
+
+ if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
+ return;
+
+ __local_bh_enable(SOFTIRQ_OFFSET, true);
+ /* preemption point */
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+}
+
/*
* Invoked from ksoftirqd_run() outside of the interrupt disabled section
* to acquire the per CPU local lock for reentrancy protection.
#endif
}
+#ifdef CONFIG_PREEMPT_RT
+DEFINE_PER_CPU(struct task_struct *, timersd);
+DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
+
+static void wake_timersd(void)
+{
+ struct task_struct *tsk = __this_cpu_read(timersd);
+
+ if (tsk)
+ wake_up_process(tsk);
+}
+
+#else
+
+static inline void wake_timersd(void) { }
+
+#endif
+
static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers() &&
+ !(in_nmi() | in_hardirq()))
+ wake_timersd();
+
tick_irq_exit();
}
.thread_comm = "ksoftirqd/%u",
};
+#ifdef CONFIG_PREEMPT_RT
+static void timersd_setup(unsigned int cpu)
+{
+ sched_set_fifo_low(current);
+}
+
+static int timersd_should_run(unsigned int cpu)
+{
+ return local_pending_timers();
+}
+
+static void run_timersd(unsigned int cpu)
+{
+ unsigned int timer_si;
+
+ ksoftirqd_run_begin();
+
+ timer_si = local_pending_timers();
+ __this_cpu_write(pending_timer_softirq, 0);
+ or_softirq_pending(timer_si);
+
+ __do_softirq();
+
+ ksoftirqd_run_end();
+}
+
+static void raise_ktimers_thread(unsigned int nr)
+{
+ trace_softirq_raise(nr);
+ __this_cpu_or(pending_timer_softirq, 1 << nr);
+}
+
+void raise_hrtimer_softirq(void)
+{
+ raise_ktimers_thread(HRTIMER_SOFTIRQ);
+}
+
+void raise_timer_softirq(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ raise_ktimers_thread(TIMER_SOFTIRQ);
+ wake_timersd();
+ local_irq_restore(flags);
+}
+
+static struct smp_hotplug_thread timer_threads = {
+ .store = &timersd,
+ .setup = timersd_setup,
+ .thread_should_run = timersd_should_run,
+ .thread_fn = run_timersd,
+ .thread_comm = "ktimers/%u",
+};
+#endif
+
static __init int spawn_ksoftirqd(void)
{
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
-
+#ifdef CONFIG_PREEMPT_RT
+ BUG_ON(smpboot_register_percpu_thread(&timer_threads));
+#endif
return 0;
}
early_initcall(spawn_ksoftirqd);
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ raise_hrtimer_softirq();
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ raise_hrtimer_softirq();
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
static inline bool local_timer_softirq_pending(void)
{
- return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
+ return local_pending_timers() & BIT(TIMER_SOFTIRQ);
}
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
*/
static void timer_sync_wait_running(struct timer_base *base)
{
- if (atomic_read(&base->timer_waiters)) {
+ bool need_preempt;
+
+ need_preempt = task_is_pi_boosted(current);
+ if (need_preempt || atomic_read(&base->timer_waiters)) {
raw_spin_unlock_irq(&base->lock);
spin_unlock(&base->expiry_lock);
+
+ if (need_preempt)
+ softirq_preempt();
+
spin_lock(&base->expiry_lock);
raw_spin_lock_irq(&base->lock);
}
if (time_before(jiffies, base->next_expiry))
return;
}
- raise_softirq(TIMER_SOFTIRQ);
+ raise_timer_softirq();
}
/*
if (tif_need_resched())
trace_flags |= TRACE_FLAG_NEED_RESCHED;
+ if (tif_need_resched_lazy())
+ trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
bh_off ? 'b' :
- (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
+ !IS_ENABLED(CONFIG_TRACE_IRQFLAGS_SUPPORT) ? 'X' :
'.';
- switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
+ switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
TRACE_FLAG_PREEMPT_RESCHED)) {
+ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
+ need_resched = 'B';
+ break;
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
need_resched = 'N';
break;
+ case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
+ need_resched = 'L';
+ break;
+ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
+ need_resched = 'b';
+ break;
case TRACE_FLAG_NEED_RESCHED:
need_resched = 'n';
break;
+ case TRACE_FLAG_NEED_RESCHED_LAZY:
+ need_resched = 'l';
+ break;
case TRACE_FLAG_PREEMPT_RESCHED:
need_resched = 'p';
break;
#endif /* CONFIG_RPS */
-/* Called from hardirq (IPI) context */
-static void trigger_rx_softirq(void *data)
-{
- struct softnet_data *sd = data;
-
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
- smp_store_release(&sd->defer_ipi_scheduled, 0);
-}
-
/*
* After we queued a packet into sd->input_pkt_queue,
* we need to make sure this queue is serviced soon.
}
}
+#ifndef CONFIG_PREEMPT_RT
+
+/* Called from hardirq (IPI) context */
+static void trigger_rx_softirq(void *data)
+{
+ struct softnet_data *sd = data;
+
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ smp_store_release(&sd->defer_ipi_scheduled, 0);
+}
+
+#else
+
+static void trigger_rx_softirq(struct work_struct *defer_work)
+{
+ struct softnet_data *sd;
+
+ sd = container_of(defer_work, struct softnet_data, defer_work);
+ smp_store_release(&sd->defer_ipi_scheduled, 0);
+ local_bh_disable();
+ skb_defer_free_flush(sd);
+ local_bh_enable();
+}
+
+#endif
+
static int napi_threaded_poll(void *data)
{
struct napi_struct *napi = data;
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
sd->cpu = i;
#endif
+#ifndef CONFIG_PREEMPT_RT
INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
+#else
+ INIT_WORK(&sd->defer_work, trigger_rx_softirq);
+#endif
spin_lock_init(&sd->defer_lock);
init_gro_hash(&sd->backlog);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
+ if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
+#ifndef CONFIG_PREEMPT_RT
smp_call_function_single_async(cpu, &sd->defer_csd);
+#else
+ schedule_work_on(cpu, &sd->defer_work);
+#endif
+ }
}
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,