From 51098f0eb22e2f54055d75dd25bc84eff07d6d8a Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Wed, 16 Nov 2022 15:47:11 +0100 Subject: [PATCH] s390/cmpxchg: make loop condition for 1,2 byte cases precise The cmpxchg implementation for 1 and 2 bytes consists of a 4 byte cmpxchg loop. Currently, the decision to retry is imprecise, looping if bits outside the target byte(s) change instead of retrying until the target byte(s) differ from the old value. E.g. if an attempt to exchange (prev_left_0 old_bytes prev_right_0) is made and it fails because the word at the address is (prev_left_1 x prev_right_1) where both x != old_bytes and one of the prev_*_1 values differs from the respective prev_*_0 value, the cmpxchg is retried, even if by a semantic equivalent to a normal cmpxchg, the exchange would fail. Instead exit the loop if x != old_bytes and retry otherwise. Signed-off-by: Janis Schoetterl-Glausch Link: https://lore.kernel.org/r/20221116144711.3811011-1-scgl@linux.ibm.com Signed-off-by: Heiko Carstens --- arch/s390/include/asm/cmpxchg.h | 60 +++++++++++++++++-------------- arch/s390/include/asm/uaccess.h | 80 ++++++++++++++++++++++------------------- 2 files changed, 78 insertions(+), 62 deletions(-) diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 1c5785b..3f26416 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -90,55 +90,63 @@ static __always_inline unsigned long __cmpxchg(unsigned long address, { switch (size) { case 1: { - unsigned int prev, tmp, shift; + unsigned int prev, shift, mask; shift = (3 ^ (address & 3)) << 3; address ^= address & 3; + old = (old & 0xff) << shift; + new = (new & 0xff) << shift; + mask = ~(0xff << shift); asm volatile( " l %[prev],%[address]\n" - "0: nr %[prev],%[mask]\n" - " lr %[tmp],%[prev]\n" - " or %[prev],%[old]\n" - " or %[tmp],%[new]\n" - " cs %[prev],%[tmp],%[address]\n" + " nr %[prev],%[mask]\n" + " xilf %[mask],0xffffffff\n" + " or %[new],%[prev]\n" + " or %[prev],%[tmp]\n" + "0: lr %[tmp],%[prev]\n" + " cs %[prev],%[new],%[address]\n" " jnl 1f\n" " xr %[tmp],%[prev]\n" + " xr %[new],%[tmp]\n" " nr %[tmp],%[mask]\n" - " jnz 0b\n" + " jz 0b\n" "1:" : [prev] "=&d" (prev), - [tmp] "=&d" (tmp), - [address] "+Q" (*(int *)address) - : [old] "d" ((old & 0xff) << shift), - [new] "d" ((new & 0xff) << shift), - [mask] "d" (~(0xff << shift)) - : "memory", "cc"); + [address] "+Q" (*(int *)address), + [tmp] "+&d" (old), + [new] "+&d" (new), + [mask] "+&d" (mask) + :: "memory", "cc"); return prev >> shift; } case 2: { - unsigned int prev, tmp, shift; + unsigned int prev, shift, mask; shift = (2 ^ (address & 2)) << 3; address ^= address & 2; + old = (old & 0xffff) << shift; + new = (new & 0xffff) << shift; + mask = ~(0xffff << shift); asm volatile( " l %[prev],%[address]\n" - "0: nr %[prev],%[mask]\n" - " lr %[tmp],%[prev]\n" - " or %[prev],%[old]\n" - " or %[tmp],%[new]\n" - " cs %[prev],%[tmp],%[address]\n" + " nr %[prev],%[mask]\n" + " xilf %[mask],0xffffffff\n" + " or %[new],%[prev]\n" + " or %[prev],%[tmp]\n" + "0: lr %[tmp],%[prev]\n" + " cs %[prev],%[new],%[address]\n" " jnl 1f\n" " xr %[tmp],%[prev]\n" + " xr %[new],%[tmp]\n" " nr %[tmp],%[mask]\n" - " jnz 0b\n" + " jz 0b\n" "1:" : [prev] "=&d" (prev), - [tmp] "=&d" (tmp), - [address] "+Q" (*(int *)address) - : [old] "d" ((old & 0xffff) << shift), - [new] "d" ((new & 0xffff) << shift), - [mask] "d" (~(0xffff << shift)) - : "memory", "cc"); + [address] "+Q" (*(int *)address), + [tmp] "+&d" (old), + [new] "+&d" (new), + [mask] "+&d" (mask) + :: "memory", "cc"); return prev >> shift; } case 4: { diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index a125e60..d028ee5 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -400,74 +400,82 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, switch (size) { case 1: { - unsigned int prev, tmp, shift; + unsigned int prev, shift, mask, _old, _new; shift = (3 ^ (address & 3)) << 3; address ^= address & 3; + _old = (old & 0xff) << shift; + _new = (new & 0xff) << shift; + mask = ~(0xff << shift); asm volatile( " spka 0(%[key])\n" " sacf 256\n" "0: l %[prev],%[address]\n" "1: nr %[prev],%[mask]\n" - " lr %[tmp],%[prev]\n" - " or %[prev],%[old]\n" - " or %[tmp],%[new]\n" - "2: cs %[prev],%[tmp],%[address]\n" - "3: jnl 4f\n" + " xilf %[mask],0xffffffff\n" + " or %[new],%[prev]\n" + " or %[prev],%[tmp]\n" + "2: lr %[tmp],%[prev]\n" + "3: cs %[prev],%[new],%[address]\n" + "4: jnl 5f\n" " xr %[tmp],%[prev]\n" + " xr %[new],%[tmp]\n" " nr %[tmp],%[mask]\n" - " jnz 1b\n" - "4: sacf 768\n" + " jz 2b\n" + "5: sacf 768\n" " spka %[default_key]\n" - EX_TABLE_UA_LOAD_REG(0b, 4b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(1b, 4b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(2b, 4b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(3b, 4b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) : [rc] "+&d" (rc), [prev] "=&d" (prev), - [tmp] "=&d" (tmp), - [address] "+Q" (*(int *)address) - : [old] "d" (((unsigned int)old & 0xff) << shift), - [new] "d" (((unsigned int)new & 0xff) << shift), - [mask] "d" (~(0xff << shift)), - [key] "a" (key << 4), + [address] "+Q" (*(int *)address), + [tmp] "+&d" (_old), + [new] "+&d" (_new), + [mask] "+&d" (mask) + : [key] "a" (key << 4), [default_key] "J" (PAGE_DEFAULT_KEY) : "memory", "cc"); *(unsigned char *)uval = prev >> shift; return rc; } case 2: { - unsigned int prev, tmp, shift; + unsigned int prev, shift, mask, _old, _new; shift = (2 ^ (address & 2)) << 3; address ^= address & 2; + _old = (old & 0xffff) << shift; + _new = (new & 0xffff) << shift; + mask = ~(0xffff << shift); asm volatile( " spka 0(%[key])\n" " sacf 256\n" "0: l %[prev],%[address]\n" "1: nr %[prev],%[mask]\n" - " lr %[tmp],%[prev]\n" - " or %[prev],%[old]\n" - " or %[tmp],%[new]\n" - "2: cs %[prev],%[tmp],%[address]\n" - "3: jnl 4f\n" + " xilf %[mask],0xffffffff\n" + " or %[new],%[prev]\n" + " or %[prev],%[tmp]\n" + "2: lr %[tmp],%[prev]\n" + "3: cs %[prev],%[new],%[address]\n" + "4: jnl 5f\n" " xr %[tmp],%[prev]\n" + " xr %[new],%[tmp]\n" " nr %[tmp],%[mask]\n" - " jnz 1b\n" - "4: sacf 768\n" + " jz 2b\n" + "5: sacf 768\n" " spka %[default_key]\n" - EX_TABLE_UA_LOAD_REG(0b, 4b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(1b, 4b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(2b, 4b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(3b, 4b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) + EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) : [rc] "+&d" (rc), [prev] "=&d" (prev), - [tmp] "=&d" (tmp), - [address] "+Q" (*(int *)address) - : [old] "d" (((unsigned int)old & 0xffff) << shift), - [new] "d" (((unsigned int)new & 0xffff) << shift), - [mask] "d" (~(0xffff << shift)), - [key] "a" (key << 4), + [address] "+Q" (*(int *)address), + [tmp] "+&d" (_old), + [new] "+&d" (_new), + [mask] "+&d" (mask) + : [key] "a" (key << 4), [default_key] "J" (PAGE_DEFAULT_KEY) : "memory", "cc"); *(unsigned short *)uval = prev >> shift; -- 2.7.4