Move Absolute Lowcore Area allocation to the decompressor.
As result, get_abs_lowcore() and put_abs_lowcore() access
brackets become really straight and do not require complex
execution context analysis and LAP and interrupts tackling.
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
extern struct vmlinux_info _vmlinux_info;
#define vmlinux _vmlinux_info
+#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
+
#endif /* __ASSEMBLY__ */
#endif /* BOOT_BOOT_H */
#include <asm/sections.h>
#include <asm/mem_detect.h>
#include <asm/maccess.h>
+#include <asm/abs_lowcore.h>
#include "decompressor.h"
#include "boot.h"
enum populate_mode {
POPULATE_NONE,
POPULATE_ONE2ONE,
+ POPULATE_ABS_LOWCORE,
};
static void boot_check_oom(void)
return -1;
case POPULATE_ONE2ONE:
return addr;
+ case POPULATE_ABS_LOWCORE:
+ return __abs_lowcore_pa(addr);
default:
return -1;
}
pgtable_populate_begin(online_end);
pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
pgtable_populate(0, online_end, POPULATE_ONE2ONE);
+ pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
+ POPULATE_ABS_LOWCORE);
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
POPULATE_NONE);
memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
#define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore))
extern unsigned long __abs_lowcore;
-extern bool abs_lowcore_mapped;
-struct lowcore *get_abs_lowcore(unsigned long *flags);
-void put_abs_lowcore(struct lowcore *lc, unsigned long flags);
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc);
void abs_lowcore_unmap(int cpu);
+static inline struct lowcore *get_abs_lowcore(void)
+{
+ int cpu;
+
+ cpu = get_cpu();
+ return ((struct lowcore *)__abs_lowcore) + cpu;
+}
+
+static inline void put_abs_lowcore(struct lowcore *lc)
+{
+ put_cpu();
+}
+
#endif /* _ASM_S390_ABS_LOWCORE_H */
#include <linux/pgtable.h>
#include <asm/abs_lowcore.h>
-#define ABS_LOWCORE_UNMAPPED 1
-#define ABS_LOWCORE_LAP_ON 2
-#define ABS_LOWCORE_IRQS_ON 4
-
unsigned long __bootdata_preserved(__abs_lowcore);
-bool __ro_after_init abs_lowcore_mapped;
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
{
addr += PAGE_SIZE;
}
}
-
-struct lowcore *get_abs_lowcore(unsigned long *flags)
-{
- unsigned long irq_flags;
- union ctlreg0 cr0;
- int cpu;
-
- *flags = 0;
- cpu = get_cpu();
- if (abs_lowcore_mapped) {
- return ((struct lowcore *)__abs_lowcore) + cpu;
- } else {
- if (cpu != 0)
- panic("Invalid unmapped absolute lowcore access\n");
- local_irq_save(irq_flags);
- if (!irqs_disabled_flags(irq_flags))
- *flags |= ABS_LOWCORE_IRQS_ON;
- __ctl_store(cr0.val, 0, 0);
- if (cr0.lap) {
- *flags |= ABS_LOWCORE_LAP_ON;
- __ctl_clear_bit(0, 28);
- }
- *flags |= ABS_LOWCORE_UNMAPPED;
- return lowcore_ptr[0];
- }
-}
-
-void put_abs_lowcore(struct lowcore *lc, unsigned long flags)
-{
- if (abs_lowcore_mapped) {
- if (flags)
- panic("Invalid mapped absolute lowcore release\n");
- } else {
- if (smp_processor_id() != 0)
- panic("Invalid mapped absolute lowcore access\n");
- if (!(flags & ABS_LOWCORE_UNMAPPED))
- panic("Invalid unmapped absolute lowcore release\n");
- if (flags & ABS_LOWCORE_LAP_ON)
- __ctl_set_bit(0, 28);
- if (flags & ABS_LOWCORE_IRQS_ON)
- local_irq_enable();
- }
- put_cpu();
-}
{
unsigned long ipib = (unsigned long) reipl_block_actual;
struct lowcore *abs_lc;
- unsigned long flags;
unsigned int csum;
csum = (__force unsigned int)
csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
- abs_lc = get_abs_lowcore(&flags);
+ abs_lc = get_abs_lowcore();
abs_lc->ipib = ipib;
abs_lc->ipib_checksum = csum;
- put_abs_lowcore(abs_lc, flags);
+ put_abs_lowcore(abs_lc);
dump_run(trigger);
}
void arch_crash_save_vmcoreinfo(void)
{
struct lowcore *abs_lc;
- unsigned long flags;
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
- abs_lc = get_abs_lowcore(&flags);
+ abs_lc = get_abs_lowcore();
abs_lc->vmcore_info = paddr_vmcoreinfo_note();
- put_abs_lowcore(abs_lc, flags);
+ put_abs_lowcore(abs_lc);
}
void machine_shutdown(void)
void __init os_info_init(void)
{
struct lowcore *abs_lc;
- unsigned long flags;
os_info.version_major = OS_INFO_VERSION_MAJOR;
os_info.version_minor = OS_INFO_VERSION_MINOR;
os_info.magic = OS_INFO_MAGIC;
os_info.csum = os_info_csum(&os_info);
- abs_lc = get_abs_lowcore(&flags);
+ abs_lc = get_abs_lowcore();
abs_lc->os_info = __pa(&os_info);
- put_abs_lowcore(abs_lc, flags);
+ put_abs_lowcore(abs_lc);
}
#ifdef CONFIG_CRASH_DUMP
{
struct lowcore *lc, *abs_lc;
unsigned long mcck_stack;
- unsigned long flags;
/*
* Setup lowcore for boot cpu
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->user_asce = S390_lowcore.user_asce;
- abs_lc = get_abs_lowcore(&flags);
+ abs_lc = get_abs_lowcore();
abs_lc->restart_stack = lc->restart_stack;
abs_lc->restart_fn = lc->restart_fn;
abs_lc->restart_data = lc->restart_data;
memcpy(abs_lc->cregs_save_area, lc->cregs_save_area, sizeof(abs_lc->cregs_save_area));
abs_lc->program_new_psw = lc->program_new_psw;
abs_lc->mcesad = lc->mcesad;
- put_abs_lowcore(abs_lc, flags);
+ put_abs_lowcore(abs_lc);
set_prefix(__pa(lc));
lowcore_ptr[0] = lc;
- if (abs_lowcore_map(0, lowcore_ptr[0], true))
+ if (abs_lowcore_map(0, lowcore_ptr[0], false))
panic("Couldn't setup absolute lowcore");
- abs_lowcore_mapped = true;
}
static struct resource code_resource = {
{
struct lowcore *lc, *abs_lc;
unsigned int source_cpu;
- unsigned long flags;
lc = lowcore_ptr[pcpu - pcpu_devices];
source_cpu = stap();
lc->restart_data = (unsigned long)data;
lc->restart_source = source_cpu;
} else {
- abs_lc = get_abs_lowcore(&flags);
+ abs_lc = get_abs_lowcore();
abs_lc->restart_stack = stack;
abs_lc->restart_fn = (unsigned long)func;
abs_lc->restart_data = (unsigned long)data;
abs_lc->restart_source = source_cpu;
- put_abs_lowcore(abs_lc, flags);
+ put_abs_lowcore(abs_lc);
}
__bpon();
asm volatile(
{
struct ec_creg_mask_parms parms = { .cr = cr, };
struct lowcore *abs_lc;
- unsigned long flags;
u64 ctlreg;
if (set) {
parms.andval = ~(1UL << bit);
}
spin_lock(&ctl_lock);
- abs_lc = get_abs_lowcore(&flags);
+ abs_lc = get_abs_lowcore();
ctlreg = abs_lc->cregs_save_area[cr];
ctlreg = (ctlreg & parms.andval) | parms.orval;
abs_lc->cregs_save_area[cr] = ctlreg;
- put_abs_lowcore(abs_lc, flags);
+ put_abs_lowcore(abs_lc);
spin_unlock(&ctl_lock);
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
}
void *ptr = phys_to_virt(addr);
void *bounce = ptr;
struct lowcore *abs_lc;
- unsigned long flags;
unsigned long size;
int this_cpu, cpu;
goto out;
size = PAGE_SIZE - (addr & ~PAGE_MASK);
if (addr < sizeof(struct lowcore)) {
- abs_lc = get_abs_lowcore(&flags);
+ abs_lc = get_abs_lowcore();
ptr = (void *)abs_lc + addr;
memcpy(bounce, ptr, size);
- put_abs_lowcore(abs_lc, flags);
+ put_abs_lowcore(abs_lc);
} else if (cpu == this_cpu) {
ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu]));
memcpy(bounce, ptr, size);