--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI2,X86-NOBMI2,FALLBACK0,X86-FALLBACK0
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI2,X86-NOBMI2,FALLBACK1,X86-FALLBACK1
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI2,X86-NOBMI2,FALLBACK2,X86-FALLBACK2
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI2,X86-BMI2,FALLBACK3,X86-FALLBACK3
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI2,X86-BMI2,FALLBACK4,X86-FALLBACK4
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI2,X64-NOBMI2,FALLBACK0,X64-FALLBACK0
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI2,X64-NOBMI2,FALLBACK1,X64-FALLBACK1
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI2,X64-NOBMI2,FALLBACK2,X64-FALLBACK2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI2,X64-BMI2,FALLBACK3,X64-FALLBACK3
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI2,X64-BMI2,FALLBACK4,X64-FALLBACK4
+
+; Patterns:
+; c) x & (-1 >> y)
+; d) x << y >> y
+; are equivalent, but we prefer the second variant if we have BMI2.
+
+; We do not test the variant where y = (32 - z), because that is BMI2's BZHI.
+
+; ---------------------------------------------------------------------------- ;
+; 8-bit
+; ---------------------------------------------------------------------------- ;
+
+define i8 @clear_highbits8_c0(i8 %val, i8 %numhighbits) nounwind {
+; X86-LABEL: clear_highbits8_c0:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shrb %cl, %al
+; X86-NEXT: andb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_highbits8_c0:
+; X64: # %bb.0:
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shrb %cl, %al
+; X64-NEXT: andb %dil, %al
+; X64-NEXT: retq
+ %mask = lshr i8 -1, %numhighbits
+ %masked = and i8 %mask, %val
+ ret i8 %masked
+}
+
+define i8 @clear_highbits8_c2_load(i8* %w, i8 %numhighbits) nounwind {
+; X86-LABEL: clear_highbits8_c2_load:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shrb %cl, %al
+; X86-NEXT: andb (%edx), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_highbits8_c2_load:
+; X64: # %bb.0:
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shrb %cl, %al
+; X64-NEXT: andb (%rdi), %al
+; X64-NEXT: retq
+ %val = load i8, i8* %w
+ %mask = lshr i8 -1, %numhighbits
+ %masked = and i8 %mask, %val
+ ret i8 %masked
+}
+
+define i8 @clear_highbits8_c4_commutative(i8 %val, i8 %numhighbits) nounwind {
+; X86-LABEL: clear_highbits8_c4_commutative:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shrb %cl, %al
+; X86-NEXT: andb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_highbits8_c4_commutative:
+; X64: # %bb.0:
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shrb %cl, %al
+; X64-NEXT: andb %dil, %al
+; X64-NEXT: retq
+ %mask = lshr i8 -1, %numhighbits
+ %masked = and i8 %val, %mask ; swapped order
+ ret i8 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; 16-bit
+; ---------------------------------------------------------------------------- ;
+
+define i16 @clear_highbits16_c0(i16 %val, i16 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits16_c0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits16_c0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $65535, %ecx # imm = 0xFFFF
+; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits16_c0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits16_c0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %mask = lshr i16 -1, %numhighbits
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_highbits16_c1_indexzext(i16 %val, i8 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits16_c1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits16_c1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $65535, %ecx # imm = 0xFFFF
+; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits16_c1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits16_c1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %sh_prom = zext i8 %numhighbits to i16
+ %mask = lshr i16 -1, %sh_prom
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_highbits16_c2_load(i16* %w, i16 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits16_c2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andw (%edx), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits16_c2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $65535, %edx # imm = 0xFFFF
+; X86-BMI2-NEXT: shrxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andw (%ecx), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits16_c2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andw (%rdi), %ax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits16_c2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andw (%rdi), %ax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %val = load i16, i16* %w
+ %mask = lshr i16 -1, %numhighbits
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_highbits16_c3_load_indexzext(i16* %w, i8 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits16_c3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andw (%edx), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits16_c3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $65535, %edx # imm = 0xFFFF
+; X86-BMI2-NEXT: shrxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andw (%ecx), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits16_c3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andw (%rdi), %ax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits16_c3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andw (%rdi), %ax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %val = load i16, i16* %w
+ %sh_prom = zext i8 %numhighbits to i16
+ %mask = lshr i16 -1, %sh_prom
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_highbits16_c4_commutative(i16 %val, i16 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits16_c4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits16_c4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $65535, %ecx # imm = 0xFFFF
+; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits16_c4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits16_c4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $65535, %eax # imm = 0xFFFF
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %mask = lshr i16 -1, %numhighbits
+ %masked = and i16 %val, %mask ; swapped order
+ ret i16 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @clear_highbits32_c0(i32 %val, i32 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits32_c0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits32_c0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits32_c0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits32_c0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_highbits32_c1_indexzext(i32 %val, i8 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits32_c1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits32_c1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits32_c1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits32_c1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_highbits32_c2_load(i32* %w, i32 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits32_c2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andl (%edx), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits32_c2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shrxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andl (%ecx), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits32_c2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andl (%rdi), %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits32_c2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl (%rdi), %eax
+; X64-BMI2-NEXT: retq
+ %val = load i32, i32* %w
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_highbits32_c3_load_indexzext(i32* %w, i8 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits32_c3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andl (%edx), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits32_c3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shrxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andl (%ecx), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits32_c3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andl (%rdi), %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits32_c3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl (%rdi), %eax
+; X64-BMI2-NEXT: retq
+ %val = load i32, i32* %w
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_highbits32_c4_commutative(i32 %val, i32 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits32_c4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shrl %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits32_c4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits32_c4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits32_c4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; 64-bit
+; ---------------------------------------------------------------------------- ;
+
+define i64 @clear_highbits64_c0(i64 %val, i64 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits64_c0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: shrl %cl, %edx
+; X86-NOBMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB13_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %edx, %eax
+; X86-NOBMI2-NEXT: xorl %edx, %edx
+; X86-NOBMI2-NEXT: .LBB13_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits64_c0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %eax
+; X86-BMI2-NEXT: shrxl %ecx, %eax, %edx
+; X86-BMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB13_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %edx, %eax
+; X86-BMI2-NEXT: xorl %edx, %edx
+; X86-BMI2-NEXT: .LBB13_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits64_c0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits64_c0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_highbits64_c1_indexzext(i64 %val, i8 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits64_c1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: shrl %cl, %edx
+; X86-NOBMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB14_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %edx, %eax
+; X86-NOBMI2-NEXT: xorl %edx, %edx
+; X86-NOBMI2-NEXT: .LBB14_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits64_c1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %eax
+; X86-BMI2-NEXT: shrxl %ecx, %eax, %edx
+; X86-BMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB14_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %edx, %eax
+; X86-BMI2-NEXT: xorl %edx, %edx
+; X86-BMI2-NEXT: .LBB14_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits64_c1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits64_c1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_highbits64_c2_load(i64* %w, i64 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits64_c2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: shrl %cl, %edx
+; X86-NOBMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB15_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %edx, %eax
+; X86-NOBMI2-NEXT: xorl %edx, %edx
+; X86-NOBMI2-NEXT: .LBB15_2:
+; X86-NOBMI2-NEXT: andl (%esi), %eax
+; X86-NOBMI2-NEXT: andl 4(%esi), %edx
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits64_c2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %eax
+; X86-BMI2-NEXT: shrxl %ecx, %eax, %edx
+; X86-BMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB15_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %edx, %eax
+; X86-BMI2-NEXT: xorl %edx, %edx
+; X86-BMI2-NEXT: .LBB15_2:
+; X86-BMI2-NEXT: andl (%esi), %eax
+; X86-BMI2-NEXT: andl 4(%esi), %edx
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits64_c2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrq %cl, %rax
+; X64-NOBMI2-NEXT: andq (%rdi), %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits64_c2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq (%rdi), %rax
+; X64-BMI2-NEXT: retq
+ %val = load i64, i64* %w
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_highbits64_c3_load_indexzext(i64* %w, i8 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits64_c3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: shrl %cl, %edx
+; X86-NOBMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB16_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %edx, %eax
+; X86-NOBMI2-NEXT: xorl %edx, %edx
+; X86-NOBMI2-NEXT: .LBB16_2:
+; X86-NOBMI2-NEXT: andl (%esi), %eax
+; X86-NOBMI2-NEXT: andl 4(%esi), %edx
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits64_c3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %eax
+; X86-BMI2-NEXT: shrxl %ecx, %eax, %edx
+; X86-BMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB16_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %edx, %eax
+; X86-BMI2-NEXT: xorl %edx, %edx
+; X86-BMI2-NEXT: .LBB16_2:
+; X86-BMI2-NEXT: andl (%esi), %eax
+; X86-BMI2-NEXT: andl 4(%esi), %edx
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits64_c3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrq %cl, %rax
+; X64-NOBMI2-NEXT: andq (%rdi), %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits64_c3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq (%rdi), %rax
+; X64-BMI2-NEXT: retq
+ %val = load i64, i64* %w
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_highbits64_c4_commutative(i64 %val, i64 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: clear_highbits64_c4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: shrl %cl, %edx
+; X86-NOBMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB17_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %edx, %eax
+; X86-NOBMI2-NEXT: xorl %edx, %edx
+; X86-NOBMI2-NEXT: .LBB17_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_highbits64_c4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %eax
+; X86-BMI2-NEXT: shrxl %ecx, %eax, %edx
+; X86-BMI2-NEXT: shrdl %cl, %eax, %eax
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB17_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %edx, %eax
+; X86-BMI2-NEXT: xorl %edx, %edx
+; X86-BMI2-NEXT: .LBB17_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_highbits64_c4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_highbits64_c4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Multi-use tests
+; ---------------------------------------------------------------------------- ;
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @oneuse32(i32 %val, i32 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: oneuse32:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: subl $8, %esp
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %esi
+; X86-NOBMI2-NEXT: shrl %cl, %esi
+; X86-NOBMI2-NEXT: movl %esi, (%esp)
+; X86-NOBMI2-NEXT: calll use32
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: movl %esi, %eax
+; X86-NOBMI2-NEXT: addl $8, %esp
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: oneuse32:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: subl $8, %esp
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shrxl %eax, %ecx, %esi
+; X86-BMI2-NEXT: movl %esi, (%esp)
+; X86-BMI2-NEXT: calll use32
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movl %esi, %eax
+; X86-BMI2-NEXT: addl $8, %esp
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: oneuse32:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: pushq %rbp
+; X64-NOBMI2-NEXT: pushq %rbx
+; X64-NOBMI2-NEXT: pushq %rax
+; X64-NOBMI2-NEXT: movl %edi, %ebx
+; X64-NOBMI2-NEXT: movl $-1, %ebp
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrl %cl, %ebp
+; X64-NOBMI2-NEXT: movl %ebp, %edi
+; X64-NOBMI2-NEXT: callq use32
+; X64-NOBMI2-NEXT: andl %ebx, %ebp
+; X64-NOBMI2-NEXT: movl %ebp, %eax
+; X64-NOBMI2-NEXT: addq $8, %rsp
+; X64-NOBMI2-NEXT: popq %rbx
+; X64-NOBMI2-NEXT: popq %rbp
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: oneuse32:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: pushq %rbp
+; X64-BMI2-NEXT: pushq %rbx
+; X64-BMI2-NEXT: pushq %rax
+; X64-BMI2-NEXT: movl %edi, %ebx
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shrxl %esi, %eax, %ebp
+; X64-BMI2-NEXT: movl %ebp, %edi
+; X64-BMI2-NEXT: callq use32
+; X64-BMI2-NEXT: andl %ebx, %ebp
+; X64-BMI2-NEXT: movl %ebp, %eax
+; X64-BMI2-NEXT: addq $8, %rsp
+; X64-BMI2-NEXT: popq %rbx
+; X64-BMI2-NEXT: popq %rbp
+; X64-BMI2-NEXT: retq
+ %mask = lshr i32 -1, %numhighbits
+ call void @use32(i32 %mask)
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i64 @oneuse64(i64 %val, i64 %numhighbits) nounwind {
+; X86-NOBMI2-LABEL: oneuse64:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %edi
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: pushl %eax
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %esi
+; X86-NOBMI2-NEXT: movl $-1, %edi
+; X86-NOBMI2-NEXT: shrl %cl, %edi
+; X86-NOBMI2-NEXT: shrdl %cl, %esi, %esi
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB19_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %edi, %esi
+; X86-NOBMI2-NEXT: xorl %edi, %edi
+; X86-NOBMI2-NEXT: .LBB19_2:
+; X86-NOBMI2-NEXT: subl $8, %esp
+; X86-NOBMI2-NEXT: pushl %edi
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: calll use64
+; X86-NOBMI2-NEXT: addl $16, %esp
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI2-NEXT: movl %esi, %eax
+; X86-NOBMI2-NEXT: movl %edi, %edx
+; X86-NOBMI2-NEXT: addl $4, %esp
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: popl %edi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: oneuse64:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %edi
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: pushl %eax
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %esi
+; X86-BMI2-NEXT: shrxl %ecx, %esi, %edi
+; X86-BMI2-NEXT: shrdl %cl, %esi, %esi
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB19_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %edi, %esi
+; X86-BMI2-NEXT: xorl %edi, %edi
+; X86-BMI2-NEXT: .LBB19_2:
+; X86-BMI2-NEXT: subl $8, %esp
+; X86-BMI2-NEXT: pushl %edi
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: calll use64
+; X86-BMI2-NEXT: addl $16, %esp
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edi
+; X86-BMI2-NEXT: movl %esi, %eax
+; X86-BMI2-NEXT: movl %edi, %edx
+; X86-BMI2-NEXT: addl $4, %esp
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: popl %edi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: oneuse64:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: pushq %r14
+; X64-NOBMI2-NEXT: pushq %rbx
+; X64-NOBMI2-NEXT: pushq %rax
+; X64-NOBMI2-NEXT: movq %rdi, %r14
+; X64-NOBMI2-NEXT: movq $-1, %rbx
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shrq %cl, %rbx
+; X64-NOBMI2-NEXT: movq %rbx, %rdi
+; X64-NOBMI2-NEXT: callq use64
+; X64-NOBMI2-NEXT: andq %r14, %rbx
+; X64-NOBMI2-NEXT: movq %rbx, %rax
+; X64-NOBMI2-NEXT: addq $8, %rsp
+; X64-NOBMI2-NEXT: popq %rbx
+; X64-NOBMI2-NEXT: popq %r14
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: oneuse64:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: pushq %r14
+; X64-BMI2-NEXT: pushq %rbx
+; X64-BMI2-NEXT: pushq %rax
+; X64-BMI2-NEXT: movq %rdi, %r14
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shrxq %rsi, %rax, %rbx
+; X64-BMI2-NEXT: movq %rbx, %rdi
+; X64-BMI2-NEXT: callq use64
+; X64-BMI2-NEXT: andq %r14, %rbx
+; X64-BMI2-NEXT: movq %rbx, %rax
+; X64-BMI2-NEXT: addq $8, %rsp
+; X64-BMI2-NEXT: popq %rbx
+; X64-BMI2-NEXT: popq %r14
+; X64-BMI2-NEXT: retq
+ %mask = lshr i64 -1, %numhighbits
+ call void @use64(i64 %mask)
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI2,X86-NOBMI2,FALLBACK0,X86-FALLBACK0
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI2,X86-NOBMI2,FALLBACK1,X86-FALLBACK1
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI2,X86-NOBMI2,FALLBACK2,X86-FALLBACK2
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI2,X86-BMI2,FALLBACK3,X86-FALLBACK3
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI2,X86-BMI2,FALLBACK4,X86-FALLBACK4
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI2,X64-NOBMI2,FALLBACK0,X64-FALLBACK0
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI2,X64-NOBMI2,FALLBACK1,X64-FALLBACK1
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI2,X64-NOBMI2,FALLBACK2,X64-FALLBACK2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI2,X64-BMI2,FALLBACK3,X64-FALLBACK3
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI2,X64-BMI2,FALLBACK4,X64-FALLBACK4
+
+; Patterns:
+; c) x & (-1 << y)
+; ic) x & (-1 << (32 - y))
+; d) x >> y << y
+; id) x >> (32 - y) << (32 - y)
+; are equivalent, but we prefer the second variant if we have BMI2.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c.
+; ---------------------------------------------------------------------------- ;
+
+; 8-bit
+
+define i8 @clear_lowbits8_c0(i8 %val, i8 %numlowbits) nounwind {
+; X86-LABEL: clear_lowbits8_c0:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shlb %cl, %al
+; X86-NEXT: andb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_lowbits8_c0:
+; X64: # %bb.0:
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shlb %cl, %al
+; X64-NEXT: andb %dil, %al
+; X64-NEXT: retq
+ %mask = shl i8 -1, %numlowbits
+ %masked = and i8 %mask, %val
+ ret i8 %masked
+}
+
+define i8 @clear_lowbits8_c2_load(i8* %w, i8 %numlowbits) nounwind {
+; X86-LABEL: clear_lowbits8_c2_load:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shlb %cl, %al
+; X86-NEXT: andb (%edx), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_lowbits8_c2_load:
+; X64: # %bb.0:
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shlb %cl, %al
+; X64-NEXT: andb (%rdi), %al
+; X64-NEXT: retq
+ %val = load i8, i8* %w
+ %mask = shl i8 -1, %numlowbits
+ %masked = and i8 %mask, %val
+ ret i8 %masked
+}
+
+define i8 @clear_lowbits8_c4_commutative(i8 %val, i8 %numlowbits) nounwind {
+; X86-LABEL: clear_lowbits8_c4_commutative:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shlb %cl, %al
+; X86-NEXT: andb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_lowbits8_c4_commutative:
+; X64: # %bb.0:
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shlb %cl, %al
+; X64-NEXT: andb %dil, %al
+; X64-NEXT: retq
+ %mask = shl i8 -1, %numlowbits
+ %masked = and i8 %val, %mask ; swapped order
+ ret i8 %masked
+}
+
+; 16-bit
+
+define i16 @clear_lowbits16_c0(i16 %val, i16 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_c0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_c0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_c0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_c0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %mask = shl i16 -1, %numlowbits
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_lowbits16_c1_indexzext(i16 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_c1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_c1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_c1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_c1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %sh_prom = zext i8 %numlowbits to i16
+ %mask = shl i16 -1, %sh_prom
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_lowbits16_c2_load(i16* %w, i16 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_c2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw (%edx), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_c2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andw (%ecx), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_c2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andw (%rdi), %ax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_c2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andw (%rdi), %ax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %val = load i16, i16* %w
+ %mask = shl i16 -1, %numlowbits
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_lowbits16_c3_load_indexzext(i16* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_c3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw (%edx), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_c3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andw (%ecx), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_c3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andw (%rdi), %ax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_c3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andw (%rdi), %ax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %val = load i16, i16* %w
+ %sh_prom = zext i8 %numlowbits to i16
+ %mask = shl i16 -1, %sh_prom
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_lowbits16_c4_commutative(i16 %val, i16 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_c4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_c4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_c4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_c4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %mask = shl i16 -1, %numlowbits
+ %masked = and i16 %val, %mask ; swapped order
+ ret i16 %masked
+}
+
+; 32-bit
+
+define i32 @clear_lowbits32_c0(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_c0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_c0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_c0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_c0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %mask = shl i32 -1, %numlowbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_lowbits32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_c1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_c1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_c1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_c1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %sh_prom = zext i8 %numlowbits to i32
+ %mask = shl i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_lowbits32_c2_load(i32* %w, i32 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_c2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl (%edx), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_c2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andl (%ecx), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_c2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl (%rdi), %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_c2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl (%rdi), %eax
+; X64-BMI2-NEXT: retq
+ %val = load i32, i32* %w
+ %mask = shl i32 -1, %numlowbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_lowbits32_c3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_c3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl (%edx), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_c3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andl (%ecx), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_c3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl (%rdi), %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_c3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl (%rdi), %eax
+; X64-BMI2-NEXT: retq
+ %val = load i32, i32* %w
+ %sh_prom = zext i8 %numlowbits to i32
+ %mask = shl i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_lowbits32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_c4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_c4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_c4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_c4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %mask = shl i32 -1, %numlowbits
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @clear_lowbits64_c0(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_c0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB13_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB13_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_c0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB13_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB13_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_c0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_c0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %mask = shl i64 -1, %numlowbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_lowbits64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_c1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB14_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB14_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_c1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB14_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB14_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_c1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_c1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %sh_prom = zext i8 %numlowbits to i64
+ %mask = shl i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_lowbits64_c2_load(i64* %w, i64 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_c2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB15_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB15_2:
+; X86-NOBMI2-NEXT: andl 4(%esi), %edx
+; X86-NOBMI2-NEXT: andl (%esi), %eax
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_c2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB15_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB15_2:
+; X86-BMI2-NEXT: andl 4(%esi), %edx
+; X86-BMI2-NEXT: andl (%esi), %eax
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_c2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq (%rdi), %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_c2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq (%rdi), %rax
+; X64-BMI2-NEXT: retq
+ %val = load i64, i64* %w
+ %mask = shl i64 -1, %numlowbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_lowbits64_c3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_c3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB16_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB16_2:
+; X86-NOBMI2-NEXT: andl 4(%esi), %edx
+; X86-NOBMI2-NEXT: andl (%esi), %eax
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_c3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB16_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB16_2:
+; X86-BMI2-NEXT: andl 4(%esi), %edx
+; X86-BMI2-NEXT: andl (%esi), %eax
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_c3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq (%rdi), %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_c3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq (%rdi), %rax
+; X64-BMI2-NEXT: retq
+ %val = load i64, i64* %w
+ %sh_prom = zext i8 %numlowbits to i64
+ %mask = shl i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_lowbits64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_c4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB17_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB17_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_c4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB17_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB17_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_c4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_c4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %mask = shl i64 -1, %numlowbits
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern ic.
+; ---------------------------------------------------------------------------- ;
+
+; 8-bit
+
+define i8 @clear_lowbits8_ic0(i8 %val, i8 %numlowbits) nounwind {
+; X86-LABEL: clear_lowbits8_ic0:
+; X86: # %bb.0:
+; X86-NEXT: movb $8, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shlb %cl, %al
+; X86-NEXT: andb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_lowbits8_ic0:
+; X64: # %bb.0:
+; X64-NEXT: movb $8, %cl
+; X64-NEXT: subb %sil, %cl
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: shlb %cl, %al
+; X64-NEXT: andb %dil, %al
+; X64-NEXT: retq
+ %numhighbits = sub i8 8, %numlowbits
+ %mask = shl i8 -1, %numhighbits
+ %masked = and i8 %mask, %val
+ ret i8 %masked
+}
+
+define i8 @clear_lowbits8_ic2_load(i8* %w, i8 %numlowbits) nounwind {
+; X86-LABEL: clear_lowbits8_ic2_load:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb $8, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shlb %cl, %al
+; X86-NEXT: andb (%edx), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_lowbits8_ic2_load:
+; X64: # %bb.0:
+; X64-NEXT: movb $8, %cl
+; X64-NEXT: subb %sil, %cl
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: shlb %cl, %al
+; X64-NEXT: andb (%rdi), %al
+; X64-NEXT: retq
+ %val = load i8, i8* %w
+ %numhighbits = sub i8 8, %numlowbits
+ %mask = shl i8 -1, %numhighbits
+ %masked = and i8 %mask, %val
+ ret i8 %masked
+}
+
+define i8 @clear_lowbits8_ic4_commutative(i8 %val, i8 %numlowbits) nounwind {
+; X86-LABEL: clear_lowbits8_ic4_commutative:
+; X86: # %bb.0:
+; X86-NEXT: movb $8, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movb $-1, %al
+; X86-NEXT: shlb %cl, %al
+; X86-NEXT: andb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: clear_lowbits8_ic4_commutative:
+; X64: # %bb.0:
+; X64-NEXT: movb $8, %cl
+; X64-NEXT: subb %sil, %cl
+; X64-NEXT: movb $-1, %al
+; X64-NEXT: shlb %cl, %al
+; X64-NEXT: andb %dil, %al
+; X64-NEXT: retq
+ %numhighbits = sub i8 8, %numlowbits
+ %mask = shl i8 -1, %numhighbits
+ %masked = and i8 %val, %mask ; swapped order
+ ret i8 %masked
+}
+
+; 16-bit
+
+define i16 @clear_lowbits16_ic0(i16 %val, i16 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_ic0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movw $16, %cx
+; X86-NOBMI2-NEXT: subw {{[0-9]+}}(%esp), %cx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: # kill: def $cl killed $cl killed $cx
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_ic0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movw $16, %ax
+; X86-BMI2-NEXT: subw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_ic0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $16, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_ic0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $16, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i16 16, %numlowbits
+ %mask = shl i16 -1, %numhighbits
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_lowbits16_ic1_indexzext(i16 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_ic1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb $16, %cl
+; X86-NOBMI2-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_ic1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb $16, %al
+; X86-BMI2-NEXT: subb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_ic1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movb $16, %cl
+; X64-NOBMI2-NEXT: subb %sil, %cl
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_ic1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movb $16, %al
+; X64-BMI2-NEXT: subb %sil, %al
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i8 16, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i16
+ %mask = shl i16 -1, %sh_prom
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_lowbits16_ic2_load(i16* %w, i16 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_ic2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movw $16, %cx
+; X86-NOBMI2-NEXT: subw {{[0-9]+}}(%esp), %cx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: # kill: def $cl killed $cl killed $cx
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw (%edx), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_ic2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movw $16, %ax
+; X86-BMI2-NEXT: subw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andw (%ecx), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_ic2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $16, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andw (%rdi), %ax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_ic2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $16, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andw (%rdi), %ax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %val = load i16, i16* %w
+ %numhighbits = sub i16 16, %numlowbits
+ %mask = shl i16 -1, %numhighbits
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_lowbits16_ic3_load_indexzext(i16* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_ic3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb $16, %cl
+; X86-NOBMI2-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw (%edx), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_ic3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb $16, %al
+; X86-BMI2-NEXT: subb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andw (%ecx), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_ic3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movb $16, %cl
+; X64-NOBMI2-NEXT: subb %sil, %cl
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andw (%rdi), %ax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_ic3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movb $16, %al
+; X64-BMI2-NEXT: subb %sil, %al
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andw (%rdi), %ax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %val = load i16, i16* %w
+ %numhighbits = sub i8 16, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i16
+ %mask = shl i16 -1, %sh_prom
+ %masked = and i16 %mask, %val
+ ret i16 %masked
+}
+
+define i16 @clear_lowbits16_ic4_commutative(i16 %val, i16 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits16_ic4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movw $16, %cx
+; X86-NOBMI2-NEXT: subw {{[0-9]+}}(%esp), %cx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: # kill: def $cl killed $cl killed $cx
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits16_ic4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movw $16, %ax
+; X86-BMI2-NEXT: subw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits16_ic4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $16, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits16_ic4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $16, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i16 16, %numlowbits
+ %mask = shl i16 -1, %numhighbits
+ %masked = and i16 %val, %mask ; swapped order
+ ret i16 %masked
+}
+
+; 32-bit
+
+define i32 @clear_lowbits32_ic0(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_ic0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl $32, %ecx
+; X86-NOBMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_ic0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl $32, %eax
+; X86-BMI2-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_ic0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $32, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_ic0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $32, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = shl i32 -1, %numhighbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_lowbits32_ic1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_ic1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb $32, %cl
+; X86-NOBMI2-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_ic1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb $32, %al
+; X86-BMI2-NEXT: subb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_ic1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movb $32, %cl
+; X64-NOBMI2-NEXT: subb %sil, %cl
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_ic1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movb $32, %al
+; X64-BMI2-NEXT: subb %sil, %al
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = shl i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_lowbits32_ic2_load(i32* %w, i32 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_ic2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movl $32, %ecx
+; X86-NOBMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl (%edx), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_ic2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movl $32, %eax
+; X86-BMI2-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andl (%ecx), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_ic2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $32, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl (%rdi), %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_ic2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $32, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andl (%rdi), %eax
+; X64-BMI2-NEXT: retq
+ %val = load i32, i32* %w
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = shl i32 -1, %numhighbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_lowbits32_ic3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_ic3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: movb $32, %cl
+; X86-NOBMI2-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl (%edx), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_ic3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movb $32, %al
+; X86-BMI2-NEXT: subb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %eax, %edx, %eax
+; X86-BMI2-NEXT: andl (%ecx), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_ic3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movb $32, %cl
+; X64-NOBMI2-NEXT: subb %sil, %cl
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl (%rdi), %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_ic3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movb $32, %al
+; X64-BMI2-NEXT: subb %sil, %al
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andl (%rdi), %eax
+; X64-BMI2-NEXT: retq
+ %val = load i32, i32* %w
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = shl i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @clear_lowbits32_ic4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits32_ic4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl $32, %ecx
+; X86-NOBMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits32_ic4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl $32, %eax
+; X86-BMI2-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits32_ic4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $32, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movl $-1, %eax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shll %cl, %eax
+; X64-NOBMI2-NEXT: andl %edi, %eax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits32_ic4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $32, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movl $-1, %ecx
+; X64-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: andl %edi, %eax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = shl i32 -1, %numhighbits
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @clear_lowbits64_ic0(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_ic0:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl $64, %ecx
+; X86-NOBMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB31_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB31_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_ic0:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl $64, %ecx
+; X86-BMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB31_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB31_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_ic0:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $64, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_ic0:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $64, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movq $-1, %rcx
+; X64-BMI2-NEXT: shlxq %rax, %rcx, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = shl i64 -1, %numhighbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_lowbits64_ic1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_ic1_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movb $64, %cl
+; X86-NOBMI2-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB32_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB32_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_ic1_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movb $64, %cl
+; X86-BMI2-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB32_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB32_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_ic1_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movb $64, %cl
+; X64-NOBMI2-NEXT: subb %sil, %cl
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_ic1_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movb $64, %al
+; X64-BMI2-NEXT: subb %sil, %al
+; X64-BMI2-NEXT: movq $-1, %rcx
+; X64-BMI2-NEXT: shlxq %rax, %rcx, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = shl i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_lowbits64_ic2_load(i64* %w, i64 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_ic2_load:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: movl $64, %ecx
+; X86-NOBMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB33_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB33_2:
+; X86-NOBMI2-NEXT: andl 4(%esi), %edx
+; X86-NOBMI2-NEXT: andl (%esi), %eax
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_ic2_load:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movl $64, %ecx
+; X86-BMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB33_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB33_2:
+; X86-BMI2-NEXT: andl 4(%esi), %edx
+; X86-BMI2-NEXT: andl (%esi), %eax
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_ic2_load:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $64, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq (%rdi), %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_ic2_load:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $64, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movq $-1, %rcx
+; X64-BMI2-NEXT: shlxq %rax, %rcx, %rax
+; X64-BMI2-NEXT: andq (%rdi), %rax
+; X64-BMI2-NEXT: retq
+ %val = load i64, i64* %w
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = shl i64 -1, %numhighbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_lowbits64_ic3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_ic3_load_indexzext:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: movb $64, %cl
+; X86-NOBMI2-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB34_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB34_2:
+; X86-NOBMI2-NEXT: andl 4(%esi), %edx
+; X86-NOBMI2-NEXT: andl (%esi), %eax
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_ic3_load_indexzext:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movb $64, %cl
+; X86-BMI2-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB34_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB34_2:
+; X86-BMI2-NEXT: andl 4(%esi), %edx
+; X86-BMI2-NEXT: andl (%esi), %eax
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_ic3_load_indexzext:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movb $64, %cl
+; X64-NOBMI2-NEXT: subb %sil, %cl
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq (%rdi), %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_ic3_load_indexzext:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movb $64, %al
+; X64-BMI2-NEXT: subb %sil, %al
+; X64-BMI2-NEXT: movq $-1, %rcx
+; X64-BMI2-NEXT: shlxq %rax, %rcx, %rax
+; X64-BMI2-NEXT: andq (%rdi), %rax
+; X64-BMI2-NEXT: retq
+ %val = load i64, i64* %w
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = shl i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @clear_lowbits64_ic4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: clear_lowbits64_ic4_commutative:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: movl $64, %ecx
+; X86-NOBMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI2-NEXT: movl $-1, %edx
+; X86-NOBMI2-NEXT: movl $-1, %eax
+; X86-NOBMI2-NEXT: shll %cl, %eax
+; X86-NOBMI2-NEXT: shldl %cl, %edx, %edx
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB35_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %eax, %edx
+; X86-NOBMI2-NEXT: xorl %eax, %eax
+; X86-NOBMI2-NEXT: .LBB35_2:
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: clear_lowbits64_ic4_commutative:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movl $64, %ecx
+; X86-BMI2-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: shldl %cl, %edx, %edx
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB35_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB35_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: clear_lowbits64_ic4_commutative:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: movl $64, %ecx
+; X64-NOBMI2-NEXT: subl %esi, %ecx
+; X64-NOBMI2-NEXT: movq $-1, %rax
+; X64-NOBMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rax
+; X64-NOBMI2-NEXT: andq %rdi, %rax
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: clear_lowbits64_ic4_commutative:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movl $64, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: movq $-1, %rcx
+; X64-BMI2-NEXT: shlxq %rax, %rcx, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: retq
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = shl i64 -1, %numhighbits
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Multi-use tests
+; ---------------------------------------------------------------------------- ;
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @oneuse32(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: oneuse32:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: subl $8, %esp
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %esi
+; X86-NOBMI2-NEXT: shll %cl, %esi
+; X86-NOBMI2-NEXT: movl %esi, (%esp)
+; X86-NOBMI2-NEXT: calll use32
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: movl %esi, %eax
+; X86-NOBMI2-NEXT: addl $8, %esp
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: oneuse32:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: subl $8, %esp
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BMI2-NEXT: movl $-1, %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %esi
+; X86-BMI2-NEXT: movl %esi, (%esp)
+; X86-BMI2-NEXT: calll use32
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movl %esi, %eax
+; X86-BMI2-NEXT: addl $8, %esp
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: oneuse32:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: pushq %rbp
+; X64-NOBMI2-NEXT: pushq %rbx
+; X64-NOBMI2-NEXT: pushq %rax
+; X64-NOBMI2-NEXT: movl %edi, %ebx
+; X64-NOBMI2-NEXT: movl $-1, %ebp
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shll %cl, %ebp
+; X64-NOBMI2-NEXT: movl %ebp, %edi
+; X64-NOBMI2-NEXT: callq use32
+; X64-NOBMI2-NEXT: andl %ebx, %ebp
+; X64-NOBMI2-NEXT: movl %ebp, %eax
+; X64-NOBMI2-NEXT: addq $8, %rsp
+; X64-NOBMI2-NEXT: popq %rbx
+; X64-NOBMI2-NEXT: popq %rbp
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: oneuse32:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: pushq %rbp
+; X64-BMI2-NEXT: pushq %rbx
+; X64-BMI2-NEXT: pushq %rax
+; X64-BMI2-NEXT: movl %edi, %ebx
+; X64-BMI2-NEXT: movl $-1, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %ebp
+; X64-BMI2-NEXT: movl %ebp, %edi
+; X64-BMI2-NEXT: callq use32
+; X64-BMI2-NEXT: andl %ebx, %ebp
+; X64-BMI2-NEXT: movl %ebp, %eax
+; X64-BMI2-NEXT: addq $8, %rsp
+; X64-BMI2-NEXT: popq %rbx
+; X64-BMI2-NEXT: popq %rbp
+; X64-BMI2-NEXT: retq
+ %mask = shl i32 -1, %numlowbits
+ call void @use32(i32 %mask)
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i64 @oneuse64(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI2-LABEL: oneuse64:
+; X86-NOBMI2: # %bb.0:
+; X86-NOBMI2-NEXT: pushl %edi
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: pushl %eax
+; X86-NOBMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI2-NEXT: movl $-1, %esi
+; X86-NOBMI2-NEXT: movl $-1, %edi
+; X86-NOBMI2-NEXT: shll %cl, %edi
+; X86-NOBMI2-NEXT: shldl %cl, %esi, %esi
+; X86-NOBMI2-NEXT: testb $32, %cl
+; X86-NOBMI2-NEXT: je .LBB37_2
+; X86-NOBMI2-NEXT: # %bb.1:
+; X86-NOBMI2-NEXT: movl %edi, %esi
+; X86-NOBMI2-NEXT: xorl %edi, %edi
+; X86-NOBMI2-NEXT: .LBB37_2:
+; X86-NOBMI2-NEXT: subl $8, %esp
+; X86-NOBMI2-NEXT: pushl %esi
+; X86-NOBMI2-NEXT: pushl %edi
+; X86-NOBMI2-NEXT: calll use64
+; X86-NOBMI2-NEXT: addl $16, %esp
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI2-NEXT: andl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI2-NEXT: movl %edi, %eax
+; X86-NOBMI2-NEXT: movl %esi, %edx
+; X86-NOBMI2-NEXT: addl $4, %esp
+; X86-NOBMI2-NEXT: popl %esi
+; X86-NOBMI2-NEXT: popl %edi
+; X86-NOBMI2-NEXT: retl
+;
+; X86-BMI2-LABEL: oneuse64:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %edi
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: pushl %eax
+; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-BMI2-NEXT: movl $-1, %esi
+; X86-BMI2-NEXT: shlxl %ecx, %esi, %edi
+; X86-BMI2-NEXT: shldl %cl, %esi, %esi
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB37_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %edi, %esi
+; X86-BMI2-NEXT: xorl %edi, %edi
+; X86-BMI2-NEXT: .LBB37_2:
+; X86-BMI2-NEXT: subl $8, %esp
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: pushl %edi
+; X86-BMI2-NEXT: calll use64
+; X86-BMI2-NEXT: addl $16, %esp
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edi
+; X86-BMI2-NEXT: movl %edi, %eax
+; X86-BMI2-NEXT: movl %esi, %edx
+; X86-BMI2-NEXT: addl $4, %esp
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: popl %edi
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI2-LABEL: oneuse64:
+; X64-NOBMI2: # %bb.0:
+; X64-NOBMI2-NEXT: pushq %r14
+; X64-NOBMI2-NEXT: pushq %rbx
+; X64-NOBMI2-NEXT: pushq %rax
+; X64-NOBMI2-NEXT: movq %rdi, %r14
+; X64-NOBMI2-NEXT: movq $-1, %rbx
+; X64-NOBMI2-NEXT: movl %esi, %ecx
+; X64-NOBMI2-NEXT: shlq %cl, %rbx
+; X64-NOBMI2-NEXT: movq %rbx, %rdi
+; X64-NOBMI2-NEXT: callq use64
+; X64-NOBMI2-NEXT: andq %r14, %rbx
+; X64-NOBMI2-NEXT: movq %rbx, %rax
+; X64-NOBMI2-NEXT: addq $8, %rsp
+; X64-NOBMI2-NEXT: popq %rbx
+; X64-NOBMI2-NEXT: popq %r14
+; X64-NOBMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: oneuse64:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: pushq %r14
+; X64-BMI2-NEXT: pushq %rbx
+; X64-BMI2-NEXT: pushq %rax
+; X64-BMI2-NEXT: movq %rdi, %r14
+; X64-BMI2-NEXT: movq $-1, %rax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rbx
+; X64-BMI2-NEXT: movq %rbx, %rdi
+; X64-BMI2-NEXT: callq use64
+; X64-BMI2-NEXT: andq %r14, %rbx
+; X64-BMI2-NEXT: movq %rbx, %rax
+; X64-BMI2-NEXT: addq $8, %rsp
+; X64-BMI2-NEXT: popq %rbx
+; X64-BMI2-NEXT: popq %r14
+; X64-BMI2-NEXT: retq
+ %mask = shl i64 -1, %numlowbits
+ call void @use64(i64 %mask)
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}