s390/smp: do not preserve boot CPU lowcore on hotplug
authorAlexander Gordeev <agordeev@linux.ibm.com>
Thu, 6 May 2021 14:26:53 +0000 (16:26 +0200)
committerVasily Gorbik <gor@linux.ibm.com>
Mon, 7 Jun 2021 15:07:00 +0000 (17:07 +0200)
Once the kernel is running the boot CPU lowcore becomes
freeable and does not differ from the secondary CPU ones
in any way. Make use of it and do not preserve the boot
CPU lowcore on unplugging. That allows returning unused
memory when the boot CPU is offline and makes the code
more clear.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/kernel/smp.c

index 1e1d61a..eaca08b 100644 (file)
@@ -194,20 +194,12 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
        unsigned long async_stack, nodat_stack, mcck_stack;
        struct lowcore *lc;
 
-       if (pcpu != &pcpu_devices[0]) {
-               pcpu->lowcore = (struct lowcore *)
-                       __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
-               nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
-               if (!pcpu->lowcore || !nodat_stack)
-                       goto out;
-       } else {
-               nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
-       }
+       lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
+       nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
        async_stack = stack_alloc();
        mcck_stack = stack_alloc();
-       if (!async_stack || !mcck_stack)
-               goto out_stack;
-       lc = pcpu->lowcore;
+       if (!lc || !nodat_stack || !async_stack || !mcck_stack)
+               goto out;
        memcpy(lc, &S390_lowcore, 512);
        memset((char *) lc + 512, 0, sizeof(*lc) - 512);
        lc->async_stack = async_stack + STACK_INIT_OFFSET;
@@ -220,40 +212,37 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
        lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
        lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
        if (nmi_alloc_per_cpu(lc))
-               goto out_stack;
+               goto out;
        lowcore_ptr[cpu] = lc;
+       pcpu->lowcore = lc;
        pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
        return 0;
 
-out_stack:
+out:
        stack_free(mcck_stack);
        stack_free(async_stack);
-out:
-       if (pcpu != &pcpu_devices[0]) {
-               free_pages(nodat_stack, THREAD_SIZE_ORDER);
-               free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
-       }
+       free_pages(nodat_stack, THREAD_SIZE_ORDER);
+       free_pages((unsigned long) lc, LC_ORDER);
        return -ENOMEM;
 }
 
 static void pcpu_free_lowcore(struct pcpu *pcpu)
 {
-       unsigned long async_stack, nodat_stack, mcck_stack, lowcore;
-
-       nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
-       async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
-       mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET;
-       lowcore = (unsigned long) pcpu->lowcore;
+       unsigned long async_stack, nodat_stack, mcck_stack;
+       struct lowcore *lc;
 
+       lc = pcpu->lowcore;
+       nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
+       async_stack = lc->async_stack - STACK_INIT_OFFSET;
+       mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
        pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
        lowcore_ptr[pcpu - pcpu_devices] = NULL;
-       nmi_free_per_cpu(pcpu->lowcore);
+       pcpu->lowcore = NULL;
+       nmi_free_per_cpu(lc);
        stack_free(async_stack);
        stack_free(mcck_stack);
-       if (pcpu == &pcpu_devices[0])
-               return;
        free_pages(nodat_stack, THREAD_SIZE_ORDER);
-       free_pages(lowcore, LC_ORDER);
+       free_pages((unsigned long) lc, LC_ORDER);
 }
 
 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)