--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc < %s -global-isel=0 -mtriple=aarch64-unknown-linux-gnu -stop-after=aarch64-expand-pseudo -verify-machineinstrs | FileCheck %s
+
+define i64 @call_memcpy_intrinsic(ptr %src, ptr %dst, i64 %len) {
+ ; CHECK-LABEL: name: call_memcpy_intrinsic
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x19, $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber $sp = frame-setup STPXpre killed $lr, killed $x19, $sp, -2 :: (store (s64) into %stack.1), (store (s64) into %stack.0)
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w19, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -16
+ ; CHECK-NEXT: $x19 = ORRXrs $xzr, $x1, 0
+ ; CHECK-NEXT: BL &memcpy, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit-def $sp, implicit-def dead $x0, pcsections !0
+ ; CHECK-NEXT: renamable $x0 = LDRXui killed renamable $x19, 0 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: early-clobber $sp, $lr, $x19 = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.1), (load (s64) from %stack.0)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ call void @llvm.memcpy.p0.p0.i64(ptr %src, ptr %dst, i64 %len, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memcpy_intrinsic_sm(ptr %src, ptr %dst) {
+ ; CHECK-LABEL: name: call_memcpy_intrinsic_sm
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $w8 = LDRBBui renamable $x1, 0, pcsections !0 :: (volatile load (s8) from %ir.dst)
+ ; CHECK-NEXT: STRBBui killed renamable $w8, killed renamable $x0, 0, pcsections !0 :: (volatile store (s8) into %ir.src)
+ ; CHECK-NEXT: renamable $x0 = LDRXui killed renamable $x1, 0 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ call void @llvm.memcpy.p0.p0.i64(ptr %src, ptr %dst, i64 1, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memcpy_inline_intrinsic(ptr %src, ptr %dst) {
+ ; CHECK-LABEL: name: call_memcpy_inline_intrinsic
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $w8 = LDRBBui renamable $x1, 0, pcsections !0 :: (volatile load (s8) from %ir.dst)
+ ; CHECK-NEXT: STRBBui killed renamable $w8, killed renamable $x0, 0, pcsections !0 :: (volatile store (s8) into %ir.src)
+ ; CHECK-NEXT: renamable $x0 = LDRXui killed renamable $x1, 0 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ call void @llvm.memcpy.inline.p0.p0.i64(ptr %src, ptr %dst, i64 1, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memmove_intrinsic(ptr %src, ptr %dst, i64 %len) {
+ ; CHECK-LABEL: name: call_memmove_intrinsic
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x19, $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber $sp = frame-setup STPXpre killed $lr, killed $x19, $sp, -2 :: (store (s64) into %stack.1), (store (s64) into %stack.0)
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w19, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -16
+ ; CHECK-NEXT: $x19 = ORRXrs $xzr, $x1, 0
+ ; CHECK-NEXT: BL &memmove, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit-def $sp, implicit-def dead $x0, pcsections !0
+ ; CHECK-NEXT: renamable $x0 = LDRXui killed renamable $x19, 0 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: early-clobber $sp, $lr, $x19 = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.1), (load (s64) from %stack.0)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ call void @llvm.memmove.p0.p0.i64(ptr %src, ptr %dst, i64 %len, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memset_intrinsic(ptr %dst, i64 %len) {
+ ; CHECK-LABEL: name: call_memset_intrinsic
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0, $x1, $x19, $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber $sp = frame-setup STPXpre killed $lr, killed $x19, $sp, -2 :: (store (s64) into %stack.1), (store (s64) into %stack.0)
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w19, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -16
+ ; CHECK-NEXT: $x2 = ORRXrs $xzr, $x1, 0
+ ; CHECK-NEXT: $x19 = ORRXrs $xzr, $x0, 0
+ ; CHECK-NEXT: $w1 = ORRWrs $wzr, $wzr, 0
+ ; CHECK-NEXT: BL &memset, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit $w1, implicit $x2, implicit-def $sp, implicit-def dead $x0, pcsections !0
+ ; CHECK-NEXT: renamable $x0 = LDRXui killed renamable $x19, 0 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: early-clobber $sp, $lr, $x19 = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.1), (load (s64) from %stack.0)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ call void @llvm.memset.p0.p0.i64(ptr %dst, i8 0, i64 %len, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memset_inline_intrinsic(ptr %dst) {
+ ; CHECK-LABEL: name: call_memset_inline_intrinsic
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: STRBBui $wzr, renamable $x0, 0, pcsections !0 :: (volatile store (s8) into %ir.dst)
+ ; CHECK-NEXT: renamable $x0 = LDRXui killed renamable $x0, 0 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ call void @llvm.memset.inline.p0.p0.i64(ptr %dst, i8 0, i64 1, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memcpy_element_unordered_atomic_intrinsic() {
+ ; CHECK-LABEL: name: call_memcpy_element_unordered_atomic_intrinsic
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $lr, $sp, -16 :: (store (s64) into %stack.2)
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -16
+ ; CHECK-NEXT: $x0 = ADDXri $sp, 12, 0
+ ; CHECK-NEXT: $x1 = ADDXri $sp, 8, 0
+ ; CHECK-NEXT: dead $w2 = MOVZWi 1, 0, implicit-def $x2
+ ; CHECK-NEXT: BL &__llvm_memcpy_element_unordered_atomic_1, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit killed $x1, implicit killed $x2, implicit-def $sp, pcsections !0
+ ; CHECK-NEXT: renamable $x0 = LDRXui $sp, 1 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ %src = alloca i32, align 1
+ %dst = alloca i32, align 1
+ call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %src, ptr align 1 %dst, i64 1, i32 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memmove_element_unordered_atomic_intrinsic() {
+ ; CHECK-LABEL: name: call_memmove_element_unordered_atomic_intrinsic
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $lr, $sp, -16 :: (store (s64) into %stack.2)
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -16
+ ; CHECK-NEXT: $x0 = ADDXri $sp, 12, 0
+ ; CHECK-NEXT: $x1 = ADDXri $sp, 8, 0
+ ; CHECK-NEXT: dead $w2 = MOVZWi 1, 0, implicit-def $x2
+ ; CHECK-NEXT: BL &__llvm_memmove_element_unordered_atomic_1, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit killed $x1, implicit killed $x2, implicit-def $sp, pcsections !0
+ ; CHECK-NEXT: renamable $x0 = LDRXui $sp, 1 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ %src = alloca i32, align 1
+ %dst = alloca i32, align 1
+ call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %src, ptr align 1 %dst, i64 1, i32 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memset_element_unordered_atomic_intrinsic() {
+ ; CHECK-LABEL: name: call_memset_element_unordered_atomic_intrinsic
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $lr, $sp, -16 :: (store (s64) into %stack.1)
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -16
+ ; CHECK-NEXT: $x0 = ADDXri $sp, 12, 0
+ ; CHECK-NEXT: $w1 = ORRWrs $wzr, $wzr, 0
+ ; CHECK-NEXT: dead $w2 = MOVZWi 1, 0, implicit-def $x2
+ ; CHECK-NEXT: BL &__llvm_memset_element_unordered_atomic_1, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit killed $w1, implicit killed $x2, implicit-def $sp, pcsections !0
+ ; CHECK-NEXT: renamable $x0 = LDURXi $sp, 12 :: (load (s64) from %ir.dst)
+ ; CHECK-NEXT: early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1)
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ %dst = alloca i32, align 1
+ call void @llvm.memset.element.unordered.atomic.p0.p0.i64(ptr align 1 %dst, i8 0, i64 1, i32 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+
+!0 = !{!"foo"}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
+declare void @llvm.memset.p0.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memset.inline.p0.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
+declare void @llvm.memset.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, i8, i64, i32)
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc < %s -global-isel=0 -mtriple=x86_64-unknown-linux-gnu -stop-after=irtranslator -verify-machineinstrs | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -global-isel=0 -mtriple=i686-unknown-linux-gnu -stop-after=irtranslator -verify-machineinstrs | FileCheck %s -check-prefix=X32
+
+define i64 @call_memcpy_intrinsic(ptr %src, ptr %dst, i64 %len) {
+ ; X64-LABEL: name: call_memcpy_intrinsic
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: liveins: $rdi, $rdx, $rsi, $rbx
+ ; X64-NEXT: {{ $}}
+ ; X64-NEXT: frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X64-NEXT: CFI_INSTRUCTION offset $rbx, -16
+ ; X64-NEXT: $rbx = MOV64rr $rsi
+ ; X64-NEXT: CALL64pcrel32 target-flags(x86-plt) &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax, pcsections !0
+ ; X64-NEXT: renamable $rax = MOV64rm killed renamable $rbx, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: $rbx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memcpy_intrinsic
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: liveins: $esi
+ ; X32-NEXT: {{ $}}
+ ; X32-NEXT: frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; X32-NEXT: $esp = frame-setup SUB32ri8 $esp, 8, implicit-def dead $eflags
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X32-NEXT: CFI_INSTRUCTION offset $esi, -8
+ ; X32-NEXT: renamable $esi = MOV32rm $esp, 1, $noreg, 20, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.3)
+ ; X32-NEXT: $esp = SUB32ri8 $esp, 4, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32rmm $esp, 1, $noreg, 28, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.2, align 8), (store (s32) into stack + 8)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32r renamable $esi, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32rmm $esp, 1, $noreg, 28, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.4, align 16), (store (s32) into stack)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: CALLpcrel32 &memcpy, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, implicit-def dead $eax, pcsections !0
+ ; X32-NEXT: $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset -16
+ ; X32-NEXT: renamable $eax = MOV32rm renamable $esi, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm killed renamable $esi, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: $esp = frame-destroy ADD32ri8 $esp, 8, implicit-def dead $eflags
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X32-NEXT: $esi = frame-destroy POP32r implicit-def $esp, implicit $esp
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+ ; X32-NEXT: RET32 $eax, $edx
+ call void @llvm.memcpy.p0.p0.i64(ptr %src, ptr %dst, i64 %len, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memcpy_intrinsic_sm(ptr %src, ptr %dst) {
+ ; X64-LABEL: name: call_memcpy_intrinsic_sm
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: liveins: $rdi, $rsi
+ ; X64-NEXT: {{ $}}
+ ; X64-NEXT: $eax = MOVZX32rm8 renamable $rsi, 1, $noreg, 0, $noreg :: (volatile load (s8) from %ir.dst)
+ ; X64-NEXT: MOV8mr killed renamable $rdi, 1, $noreg, 0, $noreg, killed renamable $al, pcsections !0 :: (volatile store (s8) into %ir.src)
+ ; X64-NEXT: renamable $rax = MOV64rm killed renamable $rsi, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memcpy_intrinsic_sm
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.1, align 16)
+ ; X32-NEXT: renamable $ecx = MOV32rm $esp, 1, $noreg, 8, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.0)
+ ; X32-NEXT: $edx = MOVZX32rm8 renamable $ecx, 1, $noreg, 0, $noreg :: (volatile load (s8) from %ir.dst)
+ ; X32-NEXT: MOV8mr killed renamable $eax, 1, $noreg, 0, $noreg, killed renamable $dl, pcsections !0 :: (volatile store (s8) into %ir.src)
+ ; X32-NEXT: renamable $eax = MOV32rm renamable $ecx, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm killed renamable $ecx, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: RET32 $eax, $edx
+ call void @llvm.memcpy.p0.p0.i64(ptr %src, ptr %dst, i64 1, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memcpy_inline_intrinsic(ptr %src, ptr %dst) {
+ ; X64-LABEL: name: call_memcpy_inline_intrinsic
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: liveins: $rdi, $rsi
+ ; X64-NEXT: {{ $}}
+ ; X64-NEXT: $eax = MOVZX32rm8 renamable $rsi, 1, $noreg, 0, $noreg :: (volatile load (s8) from %ir.dst)
+ ; X64-NEXT: MOV8mr killed renamable $rdi, 1, $noreg, 0, $noreg, killed renamable $al, pcsections !0 :: (volatile store (s8) into %ir.src)
+ ; X64-NEXT: renamable $rax = MOV64rm killed renamable $rsi, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memcpy_inline_intrinsic
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.1, align 16)
+ ; X32-NEXT: renamable $ecx = MOV32rm $esp, 1, $noreg, 8, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.0)
+ ; X32-NEXT: $edx = MOVZX32rm8 renamable $ecx, 1, $noreg, 0, $noreg :: (volatile load (s8) from %ir.dst)
+ ; X32-NEXT: MOV8mr killed renamable $eax, 1, $noreg, 0, $noreg, killed renamable $dl, pcsections !0 :: (volatile store (s8) into %ir.src)
+ ; X32-NEXT: renamable $eax = MOV32rm renamable $ecx, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm killed renamable $ecx, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: RET32 $eax, $edx
+ call void @llvm.memcpy.inline.p0.p0.i64(ptr %src, ptr %dst, i64 1, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memmove_intrinsic(ptr %src, ptr %dst, i64 %len) {
+ ; X64-LABEL: name: call_memmove_intrinsic
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: liveins: $rdi, $rdx, $rsi, $rbx
+ ; X64-NEXT: {{ $}}
+ ; X64-NEXT: frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X64-NEXT: CFI_INSTRUCTION offset $rbx, -16
+ ; X64-NEXT: $rbx = MOV64rr $rsi
+ ; X64-NEXT: CALL64pcrel32 target-flags(x86-plt) &memmove, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax, pcsections !0
+ ; X64-NEXT: renamable $rax = MOV64rm killed renamable $rbx, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: $rbx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memmove_intrinsic
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: liveins: $esi
+ ; X32-NEXT: {{ $}}
+ ; X32-NEXT: frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; X32-NEXT: $esp = frame-setup SUB32ri8 $esp, 8, implicit-def dead $eflags
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X32-NEXT: CFI_INSTRUCTION offset $esi, -8
+ ; X32-NEXT: renamable $esi = MOV32rm $esp, 1, $noreg, 20, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.3)
+ ; X32-NEXT: $esp = SUB32ri8 $esp, 4, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32rmm $esp, 1, $noreg, 28, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.2, align 8), (store (s32) into stack + 8)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32r renamable $esi, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32rmm $esp, 1, $noreg, 28, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.4, align 16), (store (s32) into stack)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: CALLpcrel32 &memmove, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, implicit-def dead $eax, pcsections !0
+ ; X32-NEXT: $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset -16
+ ; X32-NEXT: renamable $eax = MOV32rm renamable $esi, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm killed renamable $esi, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: $esp = frame-destroy ADD32ri8 $esp, 8, implicit-def dead $eflags
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X32-NEXT: $esi = frame-destroy POP32r implicit-def $esp, implicit $esp
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+ ; X32-NEXT: RET32 $eax, $edx
+ call void @llvm.memmove.p0.p0.i64(ptr %src, ptr %dst, i64 %len, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memset_intrinsic(ptr %dst, i64 %len) {
+ ; X64-LABEL: name: call_memset_intrinsic
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: liveins: $rdi, $rsi, $rbx
+ ; X64-NEXT: {{ $}}
+ ; X64-NEXT: frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X64-NEXT: CFI_INSTRUCTION offset $rbx, -16
+ ; X64-NEXT: $rdx = MOV64rr $rsi
+ ; X64-NEXT: $rbx = MOV64rr $rdi
+ ; X64-NEXT: $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags, pcsections !0
+ ; X64-NEXT: CALL64pcrel32 target-flags(x86-plt) &memset, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax, pcsections !0
+ ; X64-NEXT: renamable $rax = MOV64rm killed renamable $rbx, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: $rbx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memset_intrinsic
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: liveins: $esi
+ ; X32-NEXT: {{ $}}
+ ; X32-NEXT: frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; X32-NEXT: $esp = frame-setup SUB32ri8 $esp, 8, implicit-def dead $eflags
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X32-NEXT: CFI_INSTRUCTION offset $esi, -8
+ ; X32-NEXT: renamable $esi = MOV32rm $esp, 1, $noreg, 16, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.3, align 16)
+ ; X32-NEXT: $esp = SUB32ri8 $esp, 4, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32rmm $esp, 1, $noreg, 24, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.2), (store (s32) into stack + 8)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32r renamable $esi, implicit-def $esp, implicit $esp :: (store (s32) into stack)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: CALLpcrel32 &memset, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, implicit-def dead $eax, pcsections !0
+ ; X32-NEXT: $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset -16
+ ; X32-NEXT: renamable $eax = MOV32rm renamable $esi, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm killed renamable $esi, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: $esp = frame-destroy ADD32ri8 $esp, 8, implicit-def dead $eflags
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X32-NEXT: $esi = frame-destroy POP32r implicit-def $esp, implicit $esp
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+ ; X32-NEXT: RET32 $eax, $edx
+ call void @llvm.memset.p0.p0.i64(ptr %dst, i8 0, i64 %len, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memset_inline_intrinsic(ptr %dst) {
+ ; X64-LABEL: name: call_memset_inline_intrinsic
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: liveins: $rdi
+ ; X64-NEXT: {{ $}}
+ ; X64-NEXT: MOV8mi renamable $rdi, 1, $noreg, 0, $noreg, 0, pcsections !0 :: (volatile store (s8) into %ir.dst)
+ ; X64-NEXT: renamable $rax = MOV64rm killed renamable $rdi, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memset_inline_intrinsic
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.0, align 16)
+ ; X32-NEXT: MOV8mi renamable $eax, 1, $noreg, 0, $noreg, 0, pcsections !0 :: (volatile store (s8) into %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm renamable $eax, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: renamable $eax = MOV32rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: RET32 $eax, $edx
+ call void @llvm.memset.inline.p0.p0.i64(ptr %dst, i8 0, i64 1, i1 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memcpy_element_unordered_atomic_intrinsic() {
+ ; X64-LABEL: name: call_memcpy_element_unordered_atomic_intrinsic
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X64-NEXT: renamable $rdi = LEA64r $rsp, 1, $noreg, 4, $noreg, pcsections !0
+ ; X64-NEXT: $rsi = MOV64rr $rsp
+ ; X64-NEXT: $edx = MOV32ri 1, implicit-def $rdx, pcsections !0
+ ; X64-NEXT: CALL64pcrel32 target-flags(x86-plt) &__llvm_memcpy_element_unordered_atomic_1, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit killed $rdx, implicit-def $rsp, implicit-def $ssp, pcsections !0
+ ; X64-NEXT: renamable $rax = MOV64rm $rsp, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memcpy_element_unordered_atomic_intrinsic
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: $esp = frame-setup SUB32ri8 $esp, 12, implicit-def dead $eflags
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X32-NEXT: renamable $eax = LEA32r $esp, 1, $noreg, 4, $noreg, pcsections !0
+ ; X32-NEXT: renamable $ecx = LEA32r $esp, 1, $noreg, 8, $noreg, pcsections !0
+ ; X32-NEXT: PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 12)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32i8 1, implicit-def $esp, implicit $esp :: (store (s32) into stack + 8)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32r killed renamable $eax, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32r killed renamable $ecx, implicit-def $esp, implicit $esp :: (store (s32) into stack)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: CALLpcrel32 &__llvm_memcpy_element_unordered_atomic_1, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, pcsections !0
+ ; X32-NEXT: $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset -16
+ ; X32-NEXT: renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm $esp, 1, $noreg, 8, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: $esp = frame-destroy ADD32ri8 $esp, 12, implicit-def dead $eflags
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+ ; X32-NEXT: RET32 $eax, $edx
+ %src = alloca i32, align 1
+ %dst = alloca i32, align 1
+ call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %src, ptr align 1 %dst, i64 1, i32 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memmove_element_unordered_atomic_intrinsic() {
+ ; X64-LABEL: name: call_memmove_element_unordered_atomic_intrinsic
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X64-NEXT: renamable $rdi = LEA64r $rsp, 1, $noreg, 4, $noreg, pcsections !0
+ ; X64-NEXT: $rsi = MOV64rr $rsp
+ ; X64-NEXT: $edx = MOV32ri 1, implicit-def $rdx, pcsections !0
+ ; X64-NEXT: CALL64pcrel32 target-flags(x86-plt) &__llvm_memmove_element_unordered_atomic_1, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit killed $rdx, implicit-def $rsp, implicit-def $ssp, pcsections !0
+ ; X64-NEXT: renamable $rax = MOV64rm $rsp, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memmove_element_unordered_atomic_intrinsic
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: $esp = frame-setup SUB32ri8 $esp, 12, implicit-def dead $eflags
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X32-NEXT: renamable $eax = LEA32r $esp, 1, $noreg, 4, $noreg, pcsections !0
+ ; X32-NEXT: renamable $ecx = LEA32r $esp, 1, $noreg, 8, $noreg, pcsections !0
+ ; X32-NEXT: PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 12)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32i8 1, implicit-def $esp, implicit $esp :: (store (s32) into stack + 8)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32r killed renamable $eax, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32r killed renamable $ecx, implicit-def $esp, implicit $esp :: (store (s32) into stack)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: CALLpcrel32 &__llvm_memmove_element_unordered_atomic_1, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, pcsections !0
+ ; X32-NEXT: $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset -16
+ ; X32-NEXT: renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm $esp, 1, $noreg, 8, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: $esp = frame-destroy ADD32ri8 $esp, 12, implicit-def dead $eflags
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+ ; X32-NEXT: RET32 $eax, $edx
+ %src = alloca i32, align 1
+ %dst = alloca i32, align 1
+ call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %src, ptr align 1 %dst, i64 1, i32 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+define i64 @call_memset_element_unordered_atomic_intrinsic() {
+ ; X64-LABEL: name: call_memset_element_unordered_atomic_intrinsic
+ ; X64: bb.0 (%ir-block.0):
+ ; X64-NEXT: frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X64-NEXT: renamable $rdi = LEA64r $rsp, 1, $noreg, 4, $noreg, pcsections !0
+ ; X64-NEXT: $edx = MOV32ri 1, implicit-def $rdx, pcsections !0
+ ; X64-NEXT: $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags, pcsections !0
+ ; X64-NEXT: CALL64pcrel32 target-flags(x86-plt) &__llvm_memset_element_unordered_atomic_1, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit killed $rdx, implicit-def $rsp, implicit-def $ssp, pcsections !0
+ ; X64-NEXT: renamable $rax = MOV64rm $rsp, 1, $noreg, 4, $noreg :: (load (s64) from %ir.dst)
+ ; X64-NEXT: $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+ ; X64-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+ ; X64-NEXT: RET64 $rax
+ ; X32-LABEL: name: call_memset_element_unordered_atomic_intrinsic
+ ; X32: bb.0 (%ir-block.0):
+ ; X32-NEXT: $esp = frame-setup SUB32ri8 $esp, 12, implicit-def dead $eflags
+ ; X32-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; X32-NEXT: renamable $eax = LEA32r $esp, 1, $noreg, 8, $noreg, pcsections !0
+ ; X32-NEXT: PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 12)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32i8 1, implicit-def $esp, implicit $esp :: (store (s32) into stack + 8)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: PUSH32r killed renamable $eax, implicit-def $esp, implicit $esp :: (store (s32) into stack)
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset 4
+ ; X32-NEXT: CALLpcrel32 &__llvm_memset_element_unordered_atomic_1, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, pcsections !0
+ ; X32-NEXT: $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+ ; X32-NEXT: CFI_INSTRUCTION adjust_cfa_offset -16
+ ; X32-NEXT: renamable $eax = MOV32rm $esp, 1, $noreg, 8, $noreg :: (load (s32) from %ir.dst)
+ ; X32-NEXT: renamable $edx = MOV32rm $esp, 1, $noreg, 12, $noreg :: (load (s32) from %ir.dst + 4)
+ ; X32-NEXT: $esp = frame-destroy ADD32ri8 $esp, 12, implicit-def dead $eflags
+ ; X32-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+ ; X32-NEXT: RET32 $eax, $edx
+ %dst = alloca i32, align 1
+ call void @llvm.memset.element.unordered.atomic.p0.p0.i64(ptr align 1 %dst, i8 0, i64 1, i32 1), !pcsections !0
+ %val = load i64, ptr %dst
+ ret i64 %val
+}
+
+
+!0 = !{!"foo"}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
+declare void @llvm.memset.p0.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memset.inline.p0.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
+declare void @llvm.memset.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, i8, i64, i32)