[X86] Avoid generating invalid R_X86_64_GOTPCRELX relocations
authorHarald van Dijk <harald@gigawatt.nl>
Fri, 18 Dec 2020 23:38:38 +0000 (23:38 +0000)
committerHarald van Dijk <harald@gigawatt.nl>
Fri, 18 Dec 2020 23:38:38 +0000 (23:38 +0000)
We need to make sure not to emit R_X86_64_GOTPCRELX relocations for
instructions that use a REX prefix. If a REX prefix is present, we need to
instead use a R_X86_64_REX_GOTPCRELX relocation. The existing logic for
CALL64m, JMP64m, etc. already handles this by checking the HasREX parameter
and using it to determine which relocation type to use. Do this for all
instructions that can use relaxed relocations.

Reviewed By: MaskRay

Differential Revision: https://reviews.llvm.org/D93561

lld/test/ELF/x86-64-gotpc-relax-nopic.s
llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
llvm/test/MC/ELF/got-relaxed-rex.s [deleted file]
llvm/test/MC/X86/gotpcrelx.s

index 501414f..81d25f9 100644 (file)
@@ -23,8 +23,8 @@
 # DISASM-NEXT:                 orl   {{.*}}(%rip), %edi  # 202240
 # DISASM-NEXT:                 sbbl  {{.*}}(%rip), %esi  # 202240
 # DISASM-NEXT:                 subl  {{.*}}(%rip), %ebp  # 202240
-# DISASM-NEXT:                 xorl  {{.*}}(%rip), %r8d  # 202240
-# DISASM-NEXT:                 testl %r15d, {{.*}}(%rip) # 202240
+# DISASM-NEXT:                 xorl  $0x203248, %r8d
+# DISASM-NEXT:                 testl $0x203248, %r15d
 # DISASM-NEXT:   201200:       adcq  $0x203248, %rax
 # DISASM-NEXT:                 addq  $0x203248, %rbx
 # DISASM-NEXT:                 andq  $0x203248, %rcx
index 59860ca..260253a 100644 (file)
@@ -409,6 +409,12 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
       switch (Opcode) {
       default:
         return X86::reloc_riprel_4byte;
+      case X86::MOV64rm:
+        // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
+        // special case because COFF and Mach-O don't support ELF's more
+        // flexible R_X86_64_REX_GOTPCRELX relaxation.
+        assert(HasREX);
+        return X86::reloc_riprel_4byte_movq_load;
       case X86::ADC32rm:
       case X86::ADD32rm:
       case X86::AND32rm:
@@ -419,13 +425,6 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
       case X86::SUB32rm:
       case X86::TEST32mr:
       case X86::XOR32rm:
-        return X86::reloc_riprel_4byte_relax;
-      case X86::MOV64rm:
-        // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
-        // special case because COFF and Mach-O don't support ELF's more
-        // flexible R_X86_64_REX_GOTPCRELX relaxation.
-        assert(HasREX);
-        return X86::reloc_riprel_4byte_movq_load;
       case X86::CALL64m:
       case X86::JMP64m:
       case X86::TAILJMPm64:
diff --git a/llvm/test/MC/ELF/got-relaxed-rex.s b/llvm/test/MC/ELF/got-relaxed-rex.s
deleted file mode 100644 (file)
index 1924bdd..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux %s -o - | llvm-readobj -r - | FileCheck %s
-
-// these should produce R_X86_64_REX_GOTPCRELX
-
-        movq mov@GOTPCREL(%rip), %rax
-        test %rax, test@GOTPCREL(%rip)
-        adc adc@GOTPCREL(%rip), %rax
-        add add@GOTPCREL(%rip), %rax
-        and and@GOTPCREL(%rip), %rax
-        cmp cmp@GOTPCREL(%rip), %rax
-        or  or@GOTPCREL(%rip), %rax
-        sbb sbb@GOTPCREL(%rip), %rax
-        sub sub@GOTPCREL(%rip), %rax
-        xor xor@GOTPCREL(%rip), %rax
-
-.section .norelax,"ax"
-## This expression loads the GOT entry with an offset.
-## Don't emit R_X86_64_REX_GOTPCRELX.
-        movq mov@GOTPCREL+1(%rip), %rax
-
-// CHECK:      Relocations [
-// CHECK-NEXT:   Section ({{.*}}) .rela.text {
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX mov
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX test
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX adc
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX add
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX and
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX cmp
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX or
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX sbb
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX sub
-// CHECK-NEXT:     R_X86_64_REX_GOTPCRELX xor
-// CHECK-NEXT:   }
-// CHECK-NEXT:   Section ({{.*}}) .rela.norelax {
-// CHECK-NEXT:     R_X86_64_GOTPCREL mov
-// CHECK-NEXT:   }
index 3889835..91f20c6 100644 (file)
 # CHECK-NEXT:     R_X86_64_GOTPCRELX xor
 # CHECK-NEXT:     R_X86_64_GOTPCRELX call
 # CHECK-NEXT:     R_X86_64_GOTPCRELX jmp
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX mov
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX test
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX adc
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX add
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX and
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX cmp
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX or
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX sbb
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX sub
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX xor
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX mov
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX test
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX adc
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX add
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX and
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX cmp
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX or
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX sbb
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX sub
+# CHECK-NEXT:     R_X86_64_REX_GOTPCRELX xor
 # CHECK-NEXT:   }
 
 # NORELAX-NEXT:     R_X86_64_GOTPCREL mov
 # NORELAX-NEXT:     R_X86_64_GOTPCREL xor
 # NORELAX-NEXT:     R_X86_64_GOTPCREL call
 # NORELAX-NEXT:     R_X86_64_GOTPCREL jmp
+# NORELAX-NEXT:     R_X86_64_GOTPCREL mov
+# NORELAX-NEXT:     R_X86_64_GOTPCREL test
+# NORELAX-NEXT:     R_X86_64_GOTPCREL adc
+# NORELAX-NEXT:     R_X86_64_GOTPCREL add
+# NORELAX-NEXT:     R_X86_64_GOTPCREL and
+# NORELAX-NEXT:     R_X86_64_GOTPCREL cmp
+# NORELAX-NEXT:     R_X86_64_GOTPCREL or
+# NORELAX-NEXT:     R_X86_64_GOTPCREL sbb
+# NORELAX-NEXT:     R_X86_64_GOTPCREL sub
+# NORELAX-NEXT:     R_X86_64_GOTPCREL xor
+# NORELAX-NEXT:     R_X86_64_GOTPCREL mov
+# NORELAX-NEXT:     R_X86_64_GOTPCREL test
+# NORELAX-NEXT:     R_X86_64_GOTPCREL adc
+# NORELAX-NEXT:     R_X86_64_GOTPCREL add
+# NORELAX-NEXT:     R_X86_64_GOTPCREL and
+# NORELAX-NEXT:     R_X86_64_GOTPCREL cmp
+# NORELAX-NEXT:     R_X86_64_GOTPCREL or
+# NORELAX-NEXT:     R_X86_64_GOTPCREL sbb
+# NORELAX-NEXT:     R_X86_64_GOTPCREL sub
+# NORELAX-NEXT:     R_X86_64_GOTPCREL xor
 # NORELAX-NEXT:   }
 
 movl mov@GOTPCREL(%rip), %eax
@@ -46,8 +86,31 @@ xor xor@GOTPCREL(%rip), %eax
 call *call@GOTPCREL(%rip)
 jmp *jmp@GOTPCREL(%rip)
 
+movl mov@GOTPCREL(%rip), %r8d
+test %r8d, test@GOTPCREL(%rip)
+adc adc@GOTPCREL(%rip), %r8d
+add add@GOTPCREL(%rip), %r8d
+and and@GOTPCREL(%rip), %r8d
+cmp cmp@GOTPCREL(%rip), %r8d
+or  or@GOTPCREL(%rip), %r8d
+sbb sbb@GOTPCREL(%rip), %r8d
+sub sub@GOTPCREL(%rip), %r8d
+xor xor@GOTPCREL(%rip), %r8d
+
+movq mov@GOTPCREL(%rip), %rax
+test %rax, test@GOTPCREL(%rip)
+adc adc@GOTPCREL(%rip), %rax
+add add@GOTPCREL(%rip), %rax
+and and@GOTPCREL(%rip), %rax
+cmp cmp@GOTPCREL(%rip), %rax
+or  or@GOTPCREL(%rip), %rax
+sbb sbb@GOTPCREL(%rip), %rax
+sub sub@GOTPCREL(%rip), %rax
+xor xor@GOTPCREL(%rip), %rax
+
 # COMMON-NEXT:   Section ({{.*}}) .rela.norelax {
 # COMMON-NEXT:     R_X86_64_GOTPCREL mov 0x0
+# COMMON-NEXT:     R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFD
 # COMMON-NEXT:     R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFC
 # COMMON-NEXT:   }
 # COMMON-NEXT: ]
@@ -56,5 +119,7 @@ jmp *jmp@GOTPCREL(%rip)
 ## Clang may emit this expression to load the high 32-bit of the GOT entry.
 ## Don't emit R_X86_64_GOTPCRELX.
 movl mov@GOTPCREL+4(%rip), %eax
+## Don't emit R_X86_64_GOTPCRELX.
+movq mov@GOTPCREL+1(%rip), %rax
 ## We could emit R_X86_64_GOTPCRELX, but it is probably unnecessary.
 movl mov@GOTPCREL+0(%rip), %eax