From 76790cf1db53c35384a6c8c7400c03e5f028f9bc Mon Sep 17 00:00:00 2001 From: Caroline Concatto Date: Thu, 26 Jan 2023 16:50:25 +0000 Subject: [PATCH] [AArch64][SME2] Add Multi-vector add vector intrinsics Add the following intrinsic: ADD vectors NOTE: These intrinsics are still in development and are subject to future changes. Reviewed By: david-arm Differential Revision: https://reviews.llvm.org/D142455 --- llvm/include/llvm/IR/IntrinsicsAArch64.td | 6 + llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 14 ++ llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll | 155 +++++++++++++++++++++++ 3 files changed, 175 insertions(+) diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index 89b49cb..da7f575 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -3132,4 +3132,10 @@ let TargetPrefix = "aarch64" in { def int_aarch64_sme_write_vg1x2 : SME2_ZA_ArrayVector_Write_VG2_Intrinsic; def int_aarch64_sme_write_vg1x4 : SME2_ZA_ArrayVector_Write_VG4_Intrinsic; + + // + // Multi-Single Vector add + // + def int_aarch64_sve_add_single_x2 : SME2_VG2_Multi_Single_Intrinsic; + def int_aarch64_sve_add_single_x4 : SME2_VG4_Multi_Single_Intrinsic; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index c1f2c11..3ebcc24 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -5061,6 +5061,20 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) { AArch64::FCLAMP_VG4_4Z4Z_D})) SelectClamp(Node, 4, Op); return; + case Intrinsic::aarch64_sve_add_single_x2: + if (auto Op = SelectOpcodeFromVT( + Node->getValueType(0), + {AArch64::ADD_VG2_2ZZ_B, AArch64::ADD_VG2_2ZZ_H, + AArch64::ADD_VG2_2ZZ_S, AArch64::ADD_VG2_2ZZ_D})) + SelectDestructiveMultiIntrinsic(Node, 2, false, Op); + return; + case Intrinsic::aarch64_sve_add_single_x4: + if (auto Op = SelectOpcodeFromVT( + Node->getValueType(0), + {AArch64::ADD_VG4_4ZZ_B, AArch64::ADD_VG4_4ZZ_H, + AArch64::ADD_VG4_4ZZ_S, AArch64::ADD_VG4_4ZZ_D})) + SelectDestructiveMultiIntrinsic(Node, 4, false, Op); + return; } break; } diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll index 6b408d8..f25e8b7 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll @@ -370,6 +370,153 @@ define void @multi_vector_add_za_vg1x4_f64(i32 %slice, %zn ret void } +; +; ADD Vectors Multi-Single x2 +; + +define { , } @multi_vec_add_single_x2_s8( %unused, %zdn1, %zdn2, %zm) { +; CHECK-LABEL: multi_vec_add_single_x2_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: add { z4.b, z5.b }, { z4.b, z5.b }, z3.b +; CHECK-NEXT: mov z0.d, z4.d +; CHECK-NEXT: mov z1.d, z5.d +; CHECK-NEXT: ret + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv16i8( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s16( %unused, %zdn1, %zdn2, %zm) { +; CHECK-LABEL: multi_vec_add_single_x2_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: add { z4.h, z5.h }, { z4.h, z5.h }, z3.h +; CHECK-NEXT: mov z0.d, z4.d +; CHECK-NEXT: mov z1.d, z5.d +; CHECK-NEXT: ret + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv8i16( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s32( %unused, %zdn1, %zdn2, %zm) { +; CHECK-LABEL: multi_vec_add_single_x2_s32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: add { z4.s, z5.s }, { z4.s, z5.s }, z3.s +; CHECK-NEXT: mov z0.d, z4.d +; CHECK-NEXT: mov z1.d, z5.d +; CHECK-NEXT: ret + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv4i32( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s64( %unused, %zdn1, %zdn2, %zm) { +; CHECK-LABEL: multi_vec_add_single_x2_s64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: add { z4.d, z5.d }, { z4.d, z5.d }, z3.d +; CHECK-NEXT: mov z0.d, z4.d +; CHECK-NEXT: mov z1.d, z5.d +; CHECK-NEXT: ret + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv2i64( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +; +; ADD Vectors Multi-Single x4 +; + +define { , , , } @multi_vec_add_single_x4_s8( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) { +; CHECK-LABEL: multi_vec_add_single_x4_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: add { z24.b - z27.b }, { z24.b - z27.b }, z5.b +; CHECK-NEXT: mov z0.d, z24.d +; CHECK-NEXT: mov z1.d, z25.d +; CHECK-NEXT: mov z2.d, z26.d +; CHECK-NEXT: mov z3.d, z27.d +; CHECK-NEXT: ret + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv16i8( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s16( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) { +; CHECK-LABEL: multi_vec_add_x4_single_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: add { z24.h - z27.h }, { z24.h - z27.h }, z5.h +; CHECK-NEXT: mov z0.d, z24.d +; CHECK-NEXT: mov z1.d, z25.d +; CHECK-NEXT: mov z2.d, z26.d +; CHECK-NEXT: mov z3.d, z27.d +; CHECK-NEXT: ret + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv8i16( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s32( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) { +; CHECK-LABEL: multi_vec_add_x4_single_s32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: add { z24.s - z27.s }, { z24.s - z27.s }, z5.s +; CHECK-NEXT: mov z0.d, z24.d +; CHECK-NEXT: mov z1.d, z25.d +; CHECK-NEXT: mov z2.d, z26.d +; CHECK-NEXT: mov z3.d, z27.d +; CHECK-NEXT: ret + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv4i32( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s64( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) { +; CHECK-LABEL: multi_vec_add_x4_single_s64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: add { z24.d - z27.d }, { z24.d - z27.d }, z5.d +; CHECK-NEXT: mov z0.d, z24.d +; CHECK-NEXT: mov z1.d, z25.d +; CHECK-NEXT: mov z2.d, z26.d +; CHECK-NEXT: mov z3.d, z27.d +; CHECK-NEXT: ret + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv2i64( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32, , , ) declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32, , , ) declare void@llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32, , , , , ) @@ -386,3 +533,11 @@ declare void@llvm.aarch64.sme.add.za32.vg1x2.nxv4f32(i32, , declare void@llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32, , ) declare void@llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32, , ,, ) declare void@llvm.aarch64.sme.add.za64.vg1x4.nxv2f64(i32, , ,, ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv16i8(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv8i16(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv4i32(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv2i64(, , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv16i8(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv8i16(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv4i32(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv2i64(, , , , ) -- 2.7.4