// These are the same as the regular MOVZX32rr8 and MOVZX32rm8
// except that they use GR32_NOREX for the output operand register class
// instead of GR32. This allows them to operate on h registers on x86-64.
-let hasSideEffects = 0, isCodeGenOnly = 1 in {
-def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
+let hasSideEffects = 0, isPseudo = 1 in {
+def MOVZX32_NOREXrr8 : I<0, Pseudo,
(outs GR32_NOREX:$dst), (ins GR8_NOREX:$src),
- "movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
- [], IIC_MOVZX>, TB, OpSize32, Sched<[WriteALU]>;
+ "", [], IIC_MOVZX>, Sched<[WriteALU]>;
let mayLoad = 1 in
-def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
+def MOVZX32_NOREXrm8 : I<0, Pseudo,
(outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src),
- "movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
- [], IIC_MOVZX>, TB, OpSize32, Sched<[WriteALULd]>;
+ "", [], IIC_MOVZX>, Sched<[WriteALULd]>;
-def MOVSX32_NOREXrr8 : I<0xBE, MRMSrcReg,
+def MOVSX32_NOREXrr8 : I<0, Pseudo,
(outs GR32_NOREX:$dst), (ins GR8_NOREX:$src),
- "movs{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
- [], IIC_MOVSX>, TB, OpSize32, Sched<[WriteALU]>;
+ "", [], IIC_MOVSX>, Sched<[WriteALU]>;
let mayLoad = 1 in
-def MOVSX32_NOREXrm8 : I<0xBE, MRMSrcMem,
+def MOVSX32_NOREXrm8 : I<0, Pseudo,
(outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src),
- "movs{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
- [], IIC_MOVSX>, TB, OpSize32, Sched<[WriteALULd]>;
+ "", [], IIC_MOVSX>, Sched<[WriteALULd]>;
}
// MOVSX64rr8 always has a REX prefix and it has an 8-bit register
case X86::VMOVUPSZ256mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
+ case X86::MOV8rr_NOREX:
+ MI.setDesc(get(X86::MOV8rr));
+ return true;
+ case X86::MOV8rm_NOREX:
+ MI.setDesc(get(X86::MOV8rm));
+ return true;
+ case X86::MOV8mr_NOREX:
+ MI.setDesc(get(X86::MOV8mr));
+ return true;
+ case X86::MOVZX32_NOREXrr8:
+ MI.setDesc(get(X86::MOVZX32rr8));
+ return true;
+ case X86::MOVZX32_NOREXrm8:
+ MI.setDesc(get(X86::MOVZX32rm8));
+ return true;
+ case X86::MOVSX32_NOREXrr8:
+ MI.setDesc(get(X86::MOVSX32rr8));
+ return true;
+ case X86::MOVSX32_NOREXrm8:
+ MI.setDesc(get(X86::MOVSX32rm8));
+ return true;
case X86::TEST8ri_NOREX:
MI.setDesc(get(X86::TEST8ri));
return true;
// Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
// that they can be used for copying and storing h registers, which can't be
// encoded when a REX prefix is present.
-let isCodeGenOnly = 1 in {
+let isPseudo = 1 in {
let hasSideEffects = 0 in
-def MOV8rr_NOREX : I<0x88, MRMDestReg,
+def MOV8rr_NOREX : I<0, Pseudo,
(outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
- "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>,
- Sched<[WriteMove]>;
+ "", [], IIC_MOV>, Sched<[WriteMove]>;
let mayStore = 1, hasSideEffects = 0 in
-def MOV8mr_NOREX : I<0x88, MRMDestMem,
+def MOV8mr_NOREX : I<0, Pseudo,
(outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
- "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [],
- IIC_MOV_MEM>, Sched<[WriteStore]>;
+ "", [], IIC_MOV_MEM>, Sched<[WriteStore]>;
let mayLoad = 1, hasSideEffects = 0,
canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
+def MOV8rm_NOREX : I<0, Pseudo,
(outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
- "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [],
- IIC_MOV_MEM>, Sched<[WriteLoad]>;
+ "", [], IIC_MOV_MEM>, Sched<[WriteLoad]>;
}
// Note, we are currently not handling the following instructions:
// MOV64ao8, MOV64o8a
// XCHG16ar, XCHG32ar, XCHG64ar
- case X86::MOV8mr_NOREX:
case X86::MOV8mr:
- case X86::MOV8rm_NOREX:
case X86::MOV8rm:
case X86::MOV16mr:
case X86::MOV16rm:
unsigned NewOpc;
switch (OutMI.getOpcode()) {
default: llvm_unreachable("Invalid opcode");
- case X86::MOV8mr_NOREX:
case X86::MOV8mr: NewOpc = X86::MOV8o32a; break;
- case X86::MOV8rm_NOREX:
case X86::MOV8rm: NewOpc = X86::MOV8ao32; break;
case X86::MOV16mr: NewOpc = X86::MOV16o32a; break;
case X86::MOV16rm: NewOpc = X86::MOV16ao32; break;
; CHECK-LABEL: bextr32_subreg:
; CHECK: # BB#0:
; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: movzbl %ah, %eax # NOREX
+; CHECK-NEXT: movzbl %ah, %eax
; CHECK-NEXT: retq
%1 = lshr i32 %x, 8
%2 = and i32 %1, 255
; CHECK-LABEL: bextr64_subreg:
; CHECK: # BB#0:
; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: movzbl %ah, %eax # NOREX
+; CHECK-NEXT: movzbl %ah, %eax
; CHECK-NEXT: retq
%1 = lshr i64 %x, 8
%2 = and i64 %1, 255
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; CHECK-NEXT: divb %cl
-; CHECK-NEXT: movzbl %ah, %eax # NOREX
+; CHECK-NEXT: movzbl %ah, %eax
; CHECK-NEXT: retl
%result = srem i32 %a, %b
ret i32 %result
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; CHECK-NEXT: divb %cl
-; CHECK-NEXT: movzbl %ah, %edx # NOREX
+; CHECK-NEXT: movzbl %ah, %edx
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retl
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
-; X32-NEXT: movsbl %ah, %ebx # NOREX
+; X32-NEXT: movsbl %ah, %ebx
; X32-NEXT: movb %al, (%edx)
; X32-NEXT: movb %bl, (%ecx)
; X32-NEXT: popl %ebx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
-; X64-NEXT: movsbl %ah, %esi # NOREX
+; X64-NEXT: movsbl %ah, %esi
; X64-NEXT: movb %al, (%rdx)
; X64-NEXT: movb %sil, (%rcx)
; X64-NEXT: retq
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
-; X32-NEXT: movzbl %ah, %ebx # NOREX
+; X32-NEXT: movzbl %ah, %ebx
; X32-NEXT: movb %al, (%edx)
; X32-NEXT: movb %bl, (%ecx)
; X32-NEXT: popl %ebx
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X64-NEXT: divb %sil
-; X64-NEXT: movzbl %ah, %esi # NOREX
+; X64-NEXT: movzbl %ah, %esi
; X64-NEXT: movb %al, (%rdx)
; X64-NEXT: movb %sil, (%rcx)
; X64-NEXT: retq
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
-; X32-NEXT: movzbl %ah, %ecx # NOREX
+; X32-NEXT: movzbl %ah, %ecx
; X32-NEXT: movb %al, z
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: retl
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X64-NEXT: divb %sil
-; X64-NEXT: movzbl %ah, %ecx # NOREX
+; X64-NEXT: movzbl %ah, %ecx
; X64-NEXT: movb %al, {{.*}}(%rip)
; X64-NEXT: movl %ecx, %eax
; X64-NEXT: retq
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
-; X32-NEXT: movzbl %ah, %eax # NOREX
+; X32-NEXT: movzbl %ah, %eax
; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X32-NEXT: retl
;
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X64-NEXT: divb %sil
-; X64-NEXT: movzbl %ah, %eax # NOREX
+; X64-NEXT: movzbl %ah, %eax
; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X64-NEXT: retq
%1 = urem i8 %x, %y
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X32-NEXT: divb %cl
-; X32-NEXT: movzbl %ah, %eax # NOREX
+; X32-NEXT: movzbl %ah, %eax
; X32-NEXT: addb %cl, %al
; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X32-NEXT: retl
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X64-NEXT: divb %sil
-; X64-NEXT: movzbl %ah, %eax # NOREX
+; X64-NEXT: movzbl %ah, %eax
; X64-NEXT: addb %sil, %al
; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X64-NEXT: retq
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
-; X32-NEXT: movzbl %ah, %eax # NOREX
+; X32-NEXT: movzbl %ah, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
;
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X64-NEXT: divb %sil
-; X64-NEXT: movzbl %ah, %eax # NOREX
+; X64-NEXT: movzbl %ah, %eax
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: retq
%1 = urem i8 %x, %y
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
-; X32-NEXT: movsbl %ah, %ecx # NOREX
+; X32-NEXT: movsbl %ah, %ecx
; X32-NEXT: movb %al, z
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: retl
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
-; X64-NEXT: movsbl %ah, %ecx # NOREX
+; X64-NEXT: movsbl %ah, %ecx
; X64-NEXT: movb %al, {{.*}}(%rip)
; X64-NEXT: movl %ecx, %eax
; X64-NEXT: retq
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
-; X32-NEXT: movsbl %ah, %eax # NOREX
+; X32-NEXT: movsbl %ah, %eax
; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X32-NEXT: retl
;
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
-; X64-NEXT: movsbl %ah, %eax # NOREX
+; X64-NEXT: movsbl %ah, %eax
; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X64-NEXT: retq
%1 = srem i8 %x, %y
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: cbtw
; X32-NEXT: idivb %cl
-; X32-NEXT: movsbl %ah, %eax # NOREX
+; X32-NEXT: movsbl %ah, %eax
; X32-NEXT: addb %cl, %al
; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X32-NEXT: retl
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
-; X64-NEXT: movsbl %ah, %eax # NOREX
+; X64-NEXT: movsbl %ah, %eax
; X64-NEXT: addb %sil, %al
; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X64-NEXT: retq
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
-; X32-NEXT: movsbl %ah, %eax # NOREX
+; X32-NEXT: movsbl %ah, %eax
; X32-NEXT: movl %eax, %edx
; X32-NEXT: sarl $31, %edx
; X32-NEXT: retl
; X64-NEXT: movl %edi, %eax
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
-; X64-NEXT: movsbl %ah, %eax # NOREX
+; X64-NEXT: movsbl %ah, %eax
; X64-NEXT: movsbq %al, %rax
; X64-NEXT: retq
%1 = srem i8 %x, %y
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
-; X32-NEXT: movzbl %ah, %ecx # NOREX
+; X32-NEXT: movzbl %ah, %ecx
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: addl %ecx, %eax
; X32-NEXT: xorl %edx, %edx
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X64-NEXT: divb %sil
-; X64-NEXT: movzbl %ah, %ecx # NOREX
+; X64-NEXT: movzbl %ah, %ecx
; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: addq %rcx, %rax
; SSE2-X64-LABEL: extract_i8_15:
; SSE2-X64: # BB#0:
; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
-; SSE2-X64-NEXT: movb %ah, (%rdi) # NOREX
+; SSE2-X64-NEXT: movb %ah, (%rdi)
; SSE2-X64-NEXT: retq
;
; SSE41-X32-LABEL: extract_i8_15:
; SSE-F128-LABEL: extract_i8_15:
; SSE-F128: # BB#0:
; SSE-F128-NEXT: pextrw $7, %xmm0, %eax
-; SSE-F128-NEXT: movb %ah, (%rdi) # NOREX
+; SSE-F128-NEXT: movb %ah, (%rdi)
; SSE-F128-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 15
store i8 %vecext, i8* %dst, align 1
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shll $8, %ecx
; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: movzbl %ch, %eax # NOREX
+; X64-NEXT: movzbl %ch, %eax
; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; X64-NEXT: retq
;
; CHECK-LABEL: test_x86_tbm_bextri_u32_subreg:
; CHECK: # BB#0:
; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: movzbl %ah, %eax # NOREX
+; CHECK-NEXT: movzbl %ah, %eax
; CHECK-NEXT: retq
%t0 = lshr i32 %a, 8
%t1 = and i32 %t0, 255
; CHECK-LABEL: test_x86_tbm_bextri_u64_subreg:
; CHECK: # BB#0:
; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: movzbl %ah, %eax # NOREX
+; CHECK-NEXT: movzbl %ah, %eax
; CHECK-NEXT: retq
%t0 = lshr i64 %a, 8
%t1 = and i64 %t0, 255
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X86-NEXT: divb %cl
-; X86-NEXT: movzbl %ah, %eax # NOREX
+; X86-NEXT: movzbl %ah, %eax
; X86-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X86-NEXT: retl
;
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
; X64-NEXT: divb %sil
-; X64-NEXT: movzbl %ah, %eax # NOREX
+; X64-NEXT: movzbl %ah, %eax
; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; X64-NEXT: retq
%and = and i8 %y, 4