From 6d2a78996bee74611dad55b6c42b828ce1ee0953 Mon Sep 17 00:00:00 2001 From: Kerry McLaughlin Date: Thu, 17 Dec 2020 10:36:52 +0000 Subject: [PATCH] [SVE][CodeGen] Add bfloat16 support to scalable masked gather Reviewed By: david-arm Differential Revision: https://reviews.llvm.org/D93307 --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 +++- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 4 ++++ .../AArch64/sve-masked-gather-32b-signed-scaled.ll | 23 ++++++++++++++++++ .../sve-masked-gather-32b-signed-unscaled.ll | 25 ++++++++++++++++++++ .../sve-masked-gather-32b-unsigned-scaled.ll | 25 ++++++++++++++++++++ .../sve-masked-gather-32b-unsigned-unscaled.ll | 27 ++++++++++++++++++++++ .../AArch64/sve-masked-gather-64b-scaled.ll | 12 ++++++++++ .../AArch64/sve-masked-gather-64b-unscaled.ll | 13 +++++++++++ .../CodeGen/AArch64/sve-masked-scatter-legalise.ll | 12 ++++++++++ 9 files changed, 144 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index e4d1b51..9eeacc8 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1151,8 +1151,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); } - for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) + for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) { + setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); + } setOperationAction(ISD::SPLAT_VECTOR, MVT::nxv8bf16, Custom); diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index adbace2..fbe2446 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1196,6 +1196,10 @@ let Predicates = [HasSVE] in { (UUNPKLO_ZZ_D ZPR:$Zs)>; def : Pat<(nxv2bf16 (extract_subvector (nxv4bf16 ZPR:$Zs), (i64 2))), (UUNPKHI_ZZ_D ZPR:$Zs)>; + def : Pat<(nxv4bf16 (extract_subvector (nxv8bf16 ZPR:$Zs), (i64 0))), + (UUNPKLO_ZZ_S ZPR:$Zs)>; + def : Pat<(nxv4bf16 (extract_subvector (nxv8bf16 ZPR:$Zs), (i64 4))), + (UUNPKHI_ZZ_S ZPR:$Zs)>; } def : Pat<(nxv4f16 (extract_subvector (nxv8f16 ZPR:$Zs), (i64 0))), diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll index e6b89b0..25d0a47 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll @@ -48,6 +48,16 @@ define @masked_gather_nxv2f16(half* %base, %vals } +define @masked_gather_nxv2bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, %offsets + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -125,6 +135,16 @@ define @masked_gather_nxv4f16(half* %base, %vals } +define @masked_gather_nxv4bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, %offsets + %vals = call @llvm.masked.gather.nxv4bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv4f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -150,10 +170,13 @@ declare @llvm.masked.gather.nxv2i16(, i32, declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) declare @llvm.masked.gather.nxv4i16(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4bf16(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll index 2d4ce50..b9bf904 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll @@ -63,6 +63,17 @@ define @masked_gather_nxv2f16(i8* %base, ret %vals } +define @masked_gather_nxv2bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -169,6 +180,17 @@ define @masked_gather_nxv4f16(i8* %base, ret %vals } +define @masked_gather_nxv4bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv4f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -208,6 +230,7 @@ declare @llvm.masked.gather.nxv2i16(, i32, declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) @@ -215,4 +238,6 @@ declare @llvm.masked.gather.nxv4i8(, i32, @llvm.masked.gather.nxv4i16(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4bf16(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll index 41f1eb4..c7f8a76 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll @@ -52,6 +52,17 @@ define @masked_gather_nxv2f16(half* %base, %vals } +define @masked_gather_nxv2bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr bfloat, bfloat* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -136,6 +147,17 @@ define @masked_gather_nxv4f16(half* %base, %vals } +define @masked_gather_nxv4bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr bfloat, bfloat* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv4f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -163,10 +185,13 @@ declare @llvm.masked.gather.nxv2i16(, i32, declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) declare @llvm.masked.gather.nxv4i16(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4bf16(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll index 51ab73c..fe7290f 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll @@ -68,6 +68,18 @@ define @masked_gather_nxv2f16(i8* %base, ret %vals } +define @masked_gather_nxv2bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -183,6 +195,18 @@ define @masked_gather_nxv4f16(i8* %base, ret %vals } +define @masked_gather_nxv4bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv4f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -225,6 +249,7 @@ declare @llvm.masked.gather.nxv2i16(, i32, declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) @@ -232,4 +257,6 @@ declare @llvm.masked.gather.nxv4i8(, i32, @llvm.masked.gather.nxv4i16(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4bf16(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll index 15dfcc6..c594f2c 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll @@ -44,6 +44,16 @@ define @masked_gather_nxv2f16(half* %base, %vals } +define @masked_gather_nxv2bf16(bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, %offsets + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -90,5 +100,7 @@ declare @llvm.masked.gather.nxv2i16(, i32, declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll index 3320b88..beb5bf3 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll @@ -59,6 +59,17 @@ define @masked_gather_nxv2f16(i8* %base, ret %vals } +define @masked_gather_nxv2bf16(i8* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2bf16( %ptrs, i32 2, %mask, undef) + ret %vals +} + define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -121,5 +132,7 @@ declare @llvm.masked.gather.nxv2i16(, i32, declare @llvm.masked.gather.nxv2i32(, i32, , ) declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2bf16(, i32, , ) declare @llvm.masked.gather.nxv2f32(, i32, , ) declare @llvm.masked.gather.nxv2f64(, i32, , ) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll index c3746a6..9cb642f 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll @@ -25,6 +25,16 @@ define void @masked_scatter_nxv8i16( %data, i16* %base, %data, bfloat* %base, %offsets, %mask) #0 { +; CHECK-LABEL: masked_scatter_nxv8bf16 +; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1] +; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1] +; CHECK: ret + %ptrs = getelementptr bfloat, bfloat* %base, %offsets + call void @llvm.masked.scatter.nxv8bf16( %data, %ptrs, i32 1, %mask) + ret void +} + define void @masked_scatter_nxv8f32( %data, float* %base, %indexes, %masks) { ; CHECK-LABEL: masked_scatter_nxv8f32 ; CHECK-DAG: st1w { z0.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, uxtw #2] @@ -56,4 +66,6 @@ define void @masked_scatter_nxv32i32( %data, i32* %base, , , i32, ) declare void @llvm.masked.scatter.nxv8i16(, , i32, ) declare void @llvm.masked.scatter.nxv8f32(, , i32, ) +declare void @llvm.masked.scatter.nxv8bf16(, , i32, ) declare void @llvm.masked.scatter.nxv32i32(, , i32, ) +attributes #0 = { "target-features"="+sve,+bf16" } -- 2.7.4