From 79c830147866fc1daad028b9b8f77c1ed60693a6 Mon Sep 17 00:00:00 2001 From: Ivan Kosarev Date: Thu, 15 Jun 2023 10:28:10 +0100 Subject: [PATCH] [AMDGPU][GFX11] Add test coverage for 16-bit conversions, part 5. Reviewed By: Joe_Nash Differential Revision: https://reviews.llvm.org/D152805 --- llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.powi.ll | 281 ++++-- llvm/test/CodeGen/AMDGPU/half.ll | 1095 +++++++++++++++++++--- 2 files changed, 1162 insertions(+), 214 deletions(-) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.powi.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.powi.ll index 8ebcec4..db05403 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.powi.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.powi.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=hawaii < %s | FileCheck -check-prefixes=GCN,GFX7 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=hawaii < %s | FileCheck -check-prefixes=GFX78,GFX7 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GFX78,GFX8 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s define i16 @v_powi_f16(i16 %l, i32 %r) { ; GFX7-LABEL: v_powi_f16: @@ -26,6 +27,23 @@ define i16 @v_powi_f16(i16 %l, i32 %r) { ; GFX8-NEXT: v_cvt_f16_f32_e32 v0, v0 ; GFX8-NEXT: v_exp_f16_e32 v0, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f16_e32 v0, v0 +; GFX11-NEXT: v_cvt_f32_i32_e32 v1, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, v0, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX11-NEXT: v_exp_f16_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %l.cast = bitcast i16 %l to half %res = call half @llvm.powi.f16.i32(half %l.cast, i32 %r) %res.cast = bitcast half %res to i16 @@ -33,129 +51,242 @@ define i16 @v_powi_f16(i16 %l, i32 %r) { } define float @v_powi_f32(float %l, i32 %r) { -; GCN-LABEL: v_powi_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_cvt_f32_i32_e32 v1, v1 -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, v0, v1 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_cvt_f32_i32_e32 v1, v1 +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, v0, v1 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: v_cvt_f32_i32_e32 v1, v1 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, v0, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 %r) ret float %res } define float @v_powi_0_f32(float %l) { -; GCN-LABEL: v_powi_0_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, 1.0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_0_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_0_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 0) ret float %res } define float @v_powi_1_f32(float %l) { -; GCN-LABEL: v_powi_1_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_1_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_1_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 1) ret float %res } define float @v_powi_neg1_f32(float %l) { -; GCN-LABEL: v_powi_neg1_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, -1.0, v0 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_neg1_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, -1.0, v0 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_neg1_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, -1.0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 -1) ret float %res } define float @v_powi_2_f32(float %l) { -; GCN-LABEL: v_powi_2_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, 2.0, v0 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_2_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, 2.0, v0 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_2_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, 2.0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 2) ret float %res } define float @v_powi_neg2_f32(float %l) { -; GCN-LABEL: v_powi_neg2_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, -2.0, v0 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_neg2_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, -2.0, v0 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_neg2_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, -2.0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 -2) ret float %res } define float @v_powi_4_f32(float %l) { -; GCN-LABEL: v_powi_4_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, 4.0, v0 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_4_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, 4.0, v0 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_4_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, 4.0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 4) ret float %res } define float @v_powi_8_f32(float %l) { -; GCN-LABEL: v_powi_8_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, 0x41000000, v0 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_8_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, 0x41000000, v0 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_8_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, 0x41000000, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 8) ret float %res } define float @v_powi_16_f32(float %l) { -; GCN-LABEL: v_powi_16_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, 0x41800000, v0 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_16_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, 0x41800000, v0 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_16_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, 0x41800000, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 16) ret float %res } define float @v_powi_128_f32(float %l) { -; GCN-LABEL: v_powi_128_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, 0x43000000, v0 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_128_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, 0x43000000, v0 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_128_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, 0x43000000, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 128) ret float %res } define float @v_powi_neg128_f32(float %l) { -; GCN-LABEL: v_powi_neg128_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_log_f32_e32 v0, v0 -; GCN-NEXT: v_mul_legacy_f32_e32 v0, 0xc3000000, v0 -; GCN-NEXT: v_exp_f32_e32 v0, v0 -; GCN-NEXT: s_setpc_b64 s[30:31] +; GFX78-LABEL: v_powi_neg128_f32: +; GFX78: ; %bb.0: +; GFX78-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX78-NEXT: v_log_f32_e32 v0, v0 +; GFX78-NEXT: v_mul_legacy_f32_e32 v0, 0xc3000000, v0 +; GFX78-NEXT: v_exp_f32_e32 v0, v0 +; GFX78-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_powi_neg128_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: v_log_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_dx9_zero_f32_e32 v0, 0xc3000000, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_exp_f32_e32 v0, v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] %res = call float @llvm.powi.f32.i32(float %l, i32 -128) ret float %res } diff --git a/llvm/test/CodeGen/AMDGPU/half.ll b/llvm/test/CodeGen/AMDGPU/half.ll index f682d35..72e19c8 100644 --- a/llvm/test/CodeGen/AMDGPU/half.ll +++ b/llvm/test/CodeGen/AMDGPU/half.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,CI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,VI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=CIVI,CI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=CIVI,VI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GFX11 %s ; half args should be promoted to float for CI and lower. @@ -26,6 +27,17 @@ define amdgpu_kernel void @load_f16_arg(ptr addrspace(1) %out, half %arg) #0 { ; VI-NEXT: v_mov_b32_e32 v2, s2 ; VI-NEXT: flat_store_short v[0:1], v2 ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: load_f16_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm store half %arg, ptr addrspace(1) %out ret void } @@ -52,26 +64,49 @@ define amdgpu_kernel void @load_v2f16_arg(ptr addrspace(1) %out, <2 x half> %arg ; VI-NEXT: v_mov_b32_e32 v2, s2 ; VI-NEXT: flat_store_dword v[0:1], v2 ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: load_v2f16_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm store <2 x half> %arg, ptr addrspace(1) %out ret void } define amdgpu_kernel void @load_v3f16_arg(ptr addrspace(1) %out, <3 x half> %arg) #0 { -; GCN-LABEL: load_v3f16_arg: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_add_u32 s4, s0, 4 -; GCN-NEXT: s_addc_u32 s5, s1, 0 -; GCN-NEXT: v_mov_b32_e32 v2, s4 -; GCN-NEXT: v_mov_b32_e32 v4, s3 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v3, s5 -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: v_mov_b32_e32 v5, s2 -; GCN-NEXT: flat_store_short v[2:3], v4 -; GCN-NEXT: flat_store_dword v[0:1], v5 -; GCN-NEXT: s_endpgm +; CIVI-LABEL: load_v3f16_arg: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: s_add_u32 s4, s0, 4 +; CIVI-NEXT: s_addc_u32 s5, s1, 0 +; CIVI-NEXT: v_mov_b32_e32 v2, s4 +; CIVI-NEXT: v_mov_b32_e32 v4, s3 +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: v_mov_b32_e32 v3, s5 +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: v_mov_b32_e32 v5, s2 +; CIVI-NEXT: flat_store_short v[2:3], v4 +; CIVI-NEXT: flat_store_dword v[0:1], v5 +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: load_v3f16_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s3 +; GFX11-NEXT: v_mov_b32_e32 v2, s2 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b16 v0, v1, s[0:1] offset:4 +; GFX11-NEXT: global_store_b32 v0, v2, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm store <3 x half> %arg, ptr addrspace(1) %out ret void } @@ -79,16 +114,26 @@ define amdgpu_kernel void @load_v3f16_arg(ptr addrspace(1) %out, <3 x half> %arg ; FIXME: Why not one load? define amdgpu_kernel void @load_v4f16_arg(ptr addrspace(1) %out, <4 x half> %arg) #0 { -; GCN-LABEL: load_v4f16_arg: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v2, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: v_mov_b32_e32 v3, s3 -; GCN-NEXT: flat_store_dwordx2 v[0:1], v[2:3] -; GCN-NEXT: s_endpgm +; CIVI-LABEL: load_v4f16_arg: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: v_mov_b32_e32 v2, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: v_mov_b32_e32 v3, s3 +; CIVI-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: load_v4f16_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3 +; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm store <4 x half> %arg, ptr addrspace(1) %out ret void } @@ -121,6 +166,19 @@ define amdgpu_kernel void @load_v8f16_arg(ptr addrspace(1) %out, <8 x half> %arg ; VI-NEXT: v_mov_b32_e32 v3, s3 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: load_v8f16_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x10 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s7 +; GFX11-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm store <8 x half> %arg, ptr addrspace(1) %out ret void } @@ -151,6 +209,20 @@ define amdgpu_kernel void @extload_v2f16_arg(ptr addrspace(1) %out, <2 x half> % ; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v2f16_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s3, s2, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, s3 +; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %fpext = fpext <2 x half> %in to <2 x float> store <2 x float> %fpext, ptr addrspace(1) %out ret void @@ -178,6 +250,18 @@ define amdgpu_kernel void @extload_f16_to_f32_arg(ptr addrspace(1) %out, half %a ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: flat_store_dword v[0:1], v2 ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_f16_to_f32_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, s2 +; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext half %arg to float store float %ext, ptr addrspace(1) %out ret void @@ -209,6 +293,20 @@ define amdgpu_kernel void @extload_v2f16_to_v2f32_arg(ptr addrspace(1) %out, <2 ; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v2f16_to_v2f32_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s3, s2, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, s3 +; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext <2 x half> %arg to <2 x float> store <2 x float> %ext, ptr addrspace(1) %out ret void @@ -240,6 +338,19 @@ define amdgpu_kernel void @extload_v3f16_to_v3f32_arg(ptr addrspace(1) %out, <3 ; VI-NEXT: v_mov_b32_e32 v3, s0 ; VI-NEXT: flat_store_dwordx3 v[3:4], v[0:2] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v3f16_to_v3f32_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s4, s2, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, s4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, s3 +; GFX11-NEXT: global_store_b96 v3, v[0:2], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext <3 x half> %arg to <3 x float> store <3 x float> %ext, ptr addrspace(1) %out ret void @@ -275,6 +386,21 @@ define amdgpu_kernel void @extload_v4f16_to_v4f32_arg(ptr addrspace(1) %out, <4 ; VI-NEXT: v_mov_b32_e32 v4, s0 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v4f16_to_v4f32_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s4, s3, 16 +; GFX11-NEXT: s_lshr_b32 s5, s2, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, s4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, s5 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, s3 +; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext <4 x half> %arg to <4 x float> store <4 x float> %ext, ptr addrspace(1) %out ret void @@ -336,6 +462,31 @@ define amdgpu_kernel void @extload_v8f16_to_v8f32_arg(ptr addrspace(1) %out, <8 ; VI-NEXT: v_mov_b32_e32 v5, s5 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v8f16_to_v8f32_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x10 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v8, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s8, s7, 16 +; GFX11-NEXT: s_lshr_b32 s9, s6, 16 +; GFX11-NEXT: s_lshr_b32 s2, s5, 16 +; GFX11-NEXT: s_lshr_b32 s3, s4, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v6, s7 +; GFX11-NEXT: v_cvt_f32_f16_e32 v4, s6 +; GFX11-NEXT: v_cvt_f32_f16_e32 v7, s8 +; GFX11-NEXT: v_cvt_f32_f16_e32 v5, s9 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, s5 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, s2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, s3 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b128 v8, v[4:7], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v8, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext <8 x half> %arg to <8 x float> store <8 x float> %ext, ptr addrspace(1) %out ret void @@ -367,6 +518,20 @@ define amdgpu_kernel void @extload_f16_to_f64_arg(ptr addrspace(1) %out, half %a ; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_f16_to_f64_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 +; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext half %arg to double store double %ext, ptr addrspace(1) %out ret void @@ -404,6 +569,23 @@ define amdgpu_kernel void @extload_v2f16_to_v2f64_arg(ptr addrspace(1) %out, <2 ; VI-NEXT: v_mov_b32_e32 v4, s0 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v2f16_to_v2f64_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s3, s2, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, s2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v0 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v1 +; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext <2 x half> %arg to <2 x double> store <2 x double> %ext, ptr addrspace(1) %out ret void @@ -451,6 +633,26 @@ define amdgpu_kernel void @extload_v3f16_to_v3f64_arg(ptr addrspace(1) %out, <3 ; VI-NEXT: v_mov_b32_e32 v4, s0 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v3f16_to_v3f64_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s4, s2, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, s4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v6, s2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[4:5], v0 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v6 +; GFX11-NEXT: v_mov_b32_e32 v6, 0 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b64 v6, v[4:5], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v6, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext <3 x half> %arg to <3 x double> store <3 x double> %ext, ptr addrspace(1) %out ret void @@ -506,6 +708,29 @@ define amdgpu_kernel void @extload_v4f16_to_v4f64_arg(ptr addrspace(1) %out, <4 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: flat_store_dwordx4 v[0:1], v[4:7] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v4f16_to_v4f64_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s5, s3, 16 +; GFX11-NEXT: s_lshr_b32 s4, s2, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, s3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, s5 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v8, s4 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[4:5], v2 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[6:7], v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v8 +; GFX11-NEXT: v_mov_b32_e32 v8, 0 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b128 v8, v[4:7], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v8, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext <4 x half> %arg to <4 x double> store <4 x double> %ext, ptr addrspace(1) %out ret void @@ -605,97 +830,188 @@ define amdgpu_kernel void @extload_v8f16_to_v8f64_arg(ptr addrspace(1) %out, <8 ; VI-NEXT: v_mov_b32_e32 v5, s5 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: extload_v8f16_to_v8f64_arg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x10 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s9, s7, 16 +; GFX11-NEXT: s_lshr_b32 s8, s6, 16 +; GFX11-NEXT: s_lshr_b32 s3, s5, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v6, s7 +; GFX11-NEXT: v_cvt_f32_f16_e32 v11, s9 +; GFX11-NEXT: s_lshr_b32 s2, s4, 16 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, s6 +; GFX11-NEXT: v_cvt_f32_f16_e32 v10, s8 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, s5 +; GFX11-NEXT: v_cvt_f32_f16_e32 v7, s3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, s4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v16, s2 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[12:13], v6 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[14:15], v11 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[8:9], v3 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[10:11], v10 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[4:5], v2 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[6:7], v7 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v16 +; GFX11-NEXT: v_mov_b32_e32 v16, 0 +; GFX11-NEXT: s_clause 0x3 +; GFX11-NEXT: global_store_b128 v16, v[12:15], s[0:1] offset:48 +; GFX11-NEXT: global_store_b128 v16, v[8:11], s[0:1] offset:32 +; GFX11-NEXT: global_store_b128 v16, v[4:7], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v16, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %ext = fpext <8 x half> %arg to <8 x double> store <8 x double> %ext, ptr addrspace(1) %out ret void } define amdgpu_kernel void @global_load_store_f16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { -; GCN-LABEL: global_load_store_f16: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: flat_load_ushort v2, v[0:1] -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: flat_store_short v[0:1], v2 -; GCN-NEXT: s_endpgm +; CIVI-LABEL: global_load_store_f16: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: flat_load_ushort v2, v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: flat_store_short v[0:1], v2 +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: global_load_store_f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_u16 v1, v0, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load half, ptr addrspace(1) %in store half %val, ptr addrspace(1) %out ret void } define amdgpu_kernel void @global_load_store_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { -; GCN-LABEL: global_load_store_v2f16: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: flat_load_dword v2, v[0:1] -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: flat_store_dword v[0:1], v2 -; GCN-NEXT: s_endpgm +; CIVI-LABEL: global_load_store_v2f16: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: flat_load_dword v2, v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: flat_store_dword v[0:1], v2 +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: global_load_store_v2f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b32 v1, v0, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <2 x half>, ptr addrspace(1) %in store <2 x half> %val, ptr addrspace(1) %out ret void } define amdgpu_kernel void @global_load_store_v4f16(ptr addrspace(1) %in, ptr addrspace(1) %out) #0 { -; GCN-LABEL: global_load_store_v4f16: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GCN-NEXT: v_mov_b32_e32 v2, s2 -; GCN-NEXT: v_mov_b32_e32 v3, s3 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1] -; GCN-NEXT: s_endpgm +; CIVI-LABEL: global_load_store_v4f16: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v2, s2 +; CIVI-NEXT: v_mov_b32_e32 v3, s3 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: global_load_store_v4f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b64 v[0:1], v2, s[0:1] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: global_store_b64 v2, v[0:1], s[2:3] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <4 x half>, ptr addrspace(1) %in store <4 x half> %val, ptr addrspace(1) %out ret void } define amdgpu_kernel void @global_load_store_v8f16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { -; GCN-LABEL: global_load_store_v8f16: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GCN-NEXT: v_mov_b32_e32 v4, s0 -; GCN-NEXT: v_mov_b32_e32 v5, s1 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GCN-NEXT: s_endpgm +; CIVI-LABEL: global_load_store_v8f16: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: flat_load_dwordx4 v[0:3], v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v4, s0 +; CIVI-NEXT: v_mov_b32_e32 v5, s1 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: global_load_store_v8f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b128 v[0:3], v4, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <8 x half>, ptr addrspace(1) %in store <8 x half> %val, ptr addrspace(1) %out ret void } define amdgpu_kernel void @global_extload_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { -; GCN-LABEL: global_extload_f16_to_f32: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: flat_load_ushort v0, v[0:1] -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_cvt_f32_f16_e32 v2, v0 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: flat_store_dword v[0:1], v2 -; GCN-NEXT: s_endpgm +; CIVI-LABEL: global_extload_f16_to_f32: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: flat_load_ushort v0, v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: v_cvt_f32_f16_e32 v2, v0 +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: flat_store_dword v[0:1], v2 +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_f16_to_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_u16 v1, v0, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load half, ptr addrspace(1) %in %cvt = fpext half %val to float store float %cvt, ptr addrspace(1) %out @@ -733,6 +1049,21 @@ define amdgpu_kernel void @global_extload_v2f16_to_v2f32(ptr addrspace(1) %out, ; VI-NEXT: v_cvt_f32_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v2f16_to_v2f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b32 v0, v2, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, v1 +; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <2 x half>, ptr addrspace(1) %in %cvt = fpext <2 x half> %val to <2 x float> store <2 x float> %cvt, ptr addrspace(1) %out @@ -772,6 +1103,22 @@ define amdgpu_kernel void @global_extload_v3f16_to_v3f32(ptr addrspace(1) %out, ; VI-NEXT: v_cvt_f32_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; VI-NEXT: flat_store_dwordx3 v[3:4], v[0:2] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v3f16_to_v3f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b64 v[0:1], v3, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, v4 +; GFX11-NEXT: global_store_b96 v3, v[0:2], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <3 x half>, ptr addrspace(1) %in %cvt = fpext <3 x half> %val to <3 x float> store <3 x float> %cvt, ptr addrspace(1) %out @@ -814,6 +1161,24 @@ define amdgpu_kernel void @global_extload_v4f16_to_v4f32(ptr addrspace(1) %out, ; VI-NEXT: v_mov_b32_e32 v5, s1 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v4f16_to_v4f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b64 v[0:1], v4, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, v5 +; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <4 x half>, ptr addrspace(1) %in %cvt = fpext <4 x half> %val to <4 x float> store <4 x float> %cvt, ptr addrspace(1) %out @@ -876,6 +1241,31 @@ define amdgpu_kernel void @global_extload_v8f16_to_v8f32(ptr addrspace(1) %out, ; VI-NEXT: flat_store_dwordx4 v[0:1], v[8:11] ; VI-NEXT: flat_store_dwordx4 v[12:13], v[4:7] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v8f16_to_v8f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v12, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b128 v[0:3], v12, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v3 +; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v6, v1 +; GFX11-NEXT: v_cvt_f32_f16_e32 v4, v0 +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v10, v3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v8, v2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v11, v5 +; GFX11-NEXT: v_cvt_f32_f16_e32 v9, v9 +; GFX11-NEXT: v_cvt_f32_f16_e32 v7, v1 +; GFX11-NEXT: v_cvt_f32_f16_e32 v5, v0 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b128 v12, v[8:11], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v12, v[4:7], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <8 x half>, ptr addrspace(1) %in %cvt = fpext <8 x half> %val to <8 x float> store <8 x float> %cvt, ptr addrspace(1) %out @@ -990,6 +1380,48 @@ define amdgpu_kernel void @global_extload_v16f16_to_v16f32(ptr addrspace(1) %out ; VI-NEXT: flat_store_dwordx4 v[24:25], v[16:19] ; VI-NEXT: flat_store_dwordx4 v[26:27], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v16f16_to_v16f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v20, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_load_b128 v[0:3], v20, s[2:3] +; GFX11-NEXT: global_load_b128 v[4:7], v20, s[2:3] offset:16 +; GFX11-NEXT: s_waitcnt vmcnt(1) +; GFX11-NEXT: v_cvt_f32_f16_e32 v10, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f32_f16_e32 v18, v7 +; GFX11-NEXT: v_cvt_f32_f16_e32 v16, v6 +; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX11-NEXT: v_cvt_f32_f16_e32 v8, v0 +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v14, v3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v12, v2 +; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, v5 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v4 +; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v19, v7 +; GFX11-NEXT: v_cvt_f32_f16_e32 v17, v6 +; GFX11-NEXT: v_cvt_f32_f16_e32 v11, v1 +; GFX11-NEXT: v_cvt_f32_f16_e32 v15, v3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, v5 +; GFX11-NEXT: v_cvt_f32_f16_e32 v1, v4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v13, v13 +; GFX11-NEXT: v_cvt_f32_f16_e32 v9, v9 +; GFX11-NEXT: s_clause 0x3 +; GFX11-NEXT: global_store_b128 v20, v[16:19], s[0:1] offset:48 +; GFX11-NEXT: global_store_b128 v20, v[0:3], s[0:1] offset:32 +; GFX11-NEXT: global_store_b128 v20, v[12:15], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v20, v[8:11], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <16 x half>, ptr addrspace(1) %in %cvt = fpext <16 x half> %val to <16 x float> store <16 x float> %cvt, ptr addrspace(1) %out @@ -997,20 +1429,34 @@ define amdgpu_kernel void @global_extload_v16f16_to_v16f32(ptr addrspace(1) %out } define amdgpu_kernel void @global_extload_f16_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { -; GCN-LABEL: global_extload_f16_to_f64: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: flat_load_ushort v0, v[0:1] -; GCN-NEXT: v_mov_b32_e32 v2, s0 -; GCN-NEXT: v_mov_b32_e32 v3, s1 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; GCN-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 -; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1] -; GCN-NEXT: s_endpgm +; CIVI-LABEL: global_extload_f16_to_f64: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: flat_load_ushort v0, v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v2, s0 +; CIVI-NEXT: v_mov_b32_e32 v3, s1 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; CIVI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 +; CIVI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_f16_to_f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_u16 v0, v2, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 +; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load half, ptr addrspace(1) %in %cvt = fpext half %val to double store double %cvt, ptr addrspace(1) %out @@ -1052,6 +1498,24 @@ define amdgpu_kernel void @global_extload_v2f16_to_v2f64(ptr addrspace(1) %out, ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v2f16_to_v2f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b32 v0, v4, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, v1 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v2 +; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <2 x half>, ptr addrspace(1) %in %cvt = fpext <2 x half> %val to <2 x double> store <2 x double> %cvt, ptr addrspace(1) %out @@ -1107,6 +1571,28 @@ define amdgpu_kernel void @global_extload_v3f16_to_v3f64(ptr addrspace(1) %out, ; VI-NEXT: flat_store_dwordx2 v[8:9], v[6:7] ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v3f16_to_v3f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v6, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b64 v[0:1], v6, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, v1 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[4:5], v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v2 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b64 v6, v[4:5], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v6, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <3 x half>, ptr addrspace(1) %in %cvt = fpext <3 x half> %val to <3 x double> store <3 x double> %cvt, ptr addrspace(1) %out @@ -1167,6 +1653,32 @@ define amdgpu_kernel void @global_extload_v4f16_to_v4f64(ptr addrspace(1) %out, ; VI-NEXT: flat_store_dwordx4 v[10:11], v[4:7] ; VI-NEXT: flat_store_dwordx4 v[8:9], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v4f16_to_v4f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v8, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b64 v[0:1], v8, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v4, v1 +; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[4:5], v4 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-NEXT: v_cvt_f64_f32_e32 v[6:7], v2 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v3 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b128 v8, v[4:7], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v8, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <4 x half>, ptr addrspace(1) %in %cvt = fpext <4 x half> %val to <4 x double> store <4 x double> %cvt, ptr addrspace(1) %out @@ -1265,6 +1777,41 @@ define amdgpu_kernel void @global_extload_v8f16_to_v8f64(ptr addrspace(1) %out, ; VI-NEXT: flat_store_dwordx4 v[22:23], v[4:7] ; VI-NEXT: flat_store_dwordx4 v[16:17], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v8f16_to_v8f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v16, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b128 v[0:3], v16, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f32_f16_e32 v4, v0 +; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v6, v1 +; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v8, v2 +; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v17, v5 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[4:5], v6 +; GFX11-NEXT: v_cvt_f32_f16_e32 v6, v9 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v7, v7 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[12:13], v3 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[8:9], v8 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[14:15], v6 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[10:11], v2 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[6:7], v7 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v17 +; GFX11-NEXT: s_clause 0x3 +; GFX11-NEXT: global_store_b128 v16, v[12:15], s[0:1] offset:48 +; GFX11-NEXT: global_store_b128 v16, v[8:11], s[0:1] offset:32 +; GFX11-NEXT: global_store_b128 v16, v[4:7], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v16, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <8 x half>, ptr addrspace(1) %in %cvt = fpext <8 x half> %val to <8 x double> store <8 x double> %cvt, ptr addrspace(1) %out @@ -1452,6 +1999,68 @@ define amdgpu_kernel void @global_extload_v16f16_to_v16f64(ptr addrspace(1) %out ; VI-NEXT: flat_store_dwordx4 v[20:21], v[8:11] ; VI-NEXT: flat_store_dwordx4 v[22:23], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_extload_v16f16_to_v16f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v32, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_load_b128 v[0:3], v32, s[2:3] +; GFX11-NEXT: global_load_b128 v[4:7], v32, s[2:3] offset:16 +; GFX11-NEXT: s_waitcnt vmcnt(1) +; GFX11-NEXT: v_cvt_f32_f16_e32 v10, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v5 +; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v15, v7 +; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX11-NEXT: v_cvt_f32_f16_e32 v14, v6 +; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX11-NEXT: v_cvt_f32_f16_e32 v13, v3 +; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v12, v2 +; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX11-NEXT: v_cvt_f32_f16_e32 v18, v4 +; GFX11-NEXT: v_cvt_f32_f16_e32 v22, v5 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[4:5], v10 +; GFX11-NEXT: v_cvt_f32_f16_e32 v10, v23 +; GFX11-NEXT: v_cvt_f32_f16_e32 v34, v11 +; GFX11-NEXT: v_cvt_f32_f16_e32 v11, v19 +; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v7, v7 +; GFX11-NEXT: v_cvt_f32_f16_e32 v6, v6 +; GFX11-NEXT: v_cvt_f32_f16_e32 v8, v0 +; GFX11-NEXT: v_cvt_f32_f16_e32 v3, v3 +; GFX11-NEXT: v_cvt_f32_f16_e32 v2, v2 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[28:29], v22 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[30:31], v10 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[24:25], v18 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[26:27], v11 +; GFX11-NEXT: v_cvt_f32_f16_e32 v33, v9 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[20:21], v15 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[22:23], v7 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[16:17], v14 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[18:19], v6 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v8 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[8:9], v12 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[12:13], v13 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[14:15], v3 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[10:11], v2 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[6:7], v34 +; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v33 +; GFX11-NEXT: s_clause 0x7 +; GFX11-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:80 +; GFX11-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:64 +; GFX11-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:112 +; GFX11-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:96 +; GFX11-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:48 +; GFX11-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:32 +; GFX11-NEXT: global_store_b128 v32, v[4:7], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v32, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <16 x half>, ptr addrspace(1) %in %cvt = fpext <16 x half> %val to <16 x double> store <16 x double> %cvt, ptr addrspace(1) %out @@ -1459,19 +2068,31 @@ define amdgpu_kernel void @global_extload_v16f16_to_v16f64(ptr addrspace(1) %out } define amdgpu_kernel void @global_truncstore_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { -; GCN-LABEL: global_truncstore_f32_to_f16: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: flat_load_dword v0, v[0:1] -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_cvt_f16_f32_e32 v2, v0 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: flat_store_short v[0:1], v2 -; GCN-NEXT: s_endpgm +; CIVI-LABEL: global_truncstore_f32_to_f16: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: flat_load_dword v0, v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: v_cvt_f16_f32_e32 v2, v0 +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: flat_store_short v[0:1], v2 +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: global_truncstore_f32_to_f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b32 v1, v0, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX11-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load float, ptr addrspace(1) %in %cvt = fptrunc float %val to half store half %cvt, ptr addrspace(1) %out @@ -1511,6 +2132,21 @@ define amdgpu_kernel void @global_truncstore_v2f32_to_v2f16(ptr addrspace(1) %ou ; VI-NEXT: v_or_b32_e32 v2, v3, v2 ; VI-NEXT: flat_store_dword v[0:1], v2 ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_truncstore_v2f32_to_v2f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b64 v[0:1], v2, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX11-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX11-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <2 x float>, ptr addrspace(1) %in %cvt = fptrunc <2 x float> %val to <2 x half> store <2 x half> %cvt, ptr addrspace(1) %out @@ -1562,6 +2198,24 @@ define amdgpu_kernel void @global_truncstore_v3f32_to_v3f16(ptr addrspace(1) %ou ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: flat_store_dword v[0:1], v3 ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_truncstore_v3f32_to_v3f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b96 v[0:2], v3, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX11-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX11-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b16 v3, v2, s[0:1] offset:4 +; GFX11-NEXT: global_store_b32 v3, v0, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <3 x float>, ptr addrspace(1) %in %cvt = fptrunc <3 x float> %val to <3 x half> store <3 x half> %cvt, ptr addrspace(1) %out @@ -1608,6 +2262,24 @@ define amdgpu_kernel void @global_truncstore_v4f32_to_v4f16(ptr addrspace(1) %ou ; VI-NEXT: v_or_b32_e32 v2, v5, v4 ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_truncstore_v4f32_to_v4f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b128 v[0:3], v4, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX11-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX11-NEXT: v_cvt_f16_f32_e32 v5, v1 +; GFX11-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_pack_b32_f16 v1, v2, v3 +; GFX11-NEXT: v_pack_b32_f16 v0, v0, v5 +; GFX11-NEXT: global_store_b64 v4, v[0:1], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <4 x float>, ptr addrspace(1) %in %cvt = fptrunc <4 x float> %val to <4 x half> store <4 x half> %cvt, ptr addrspace(1) %out @@ -1680,6 +2352,33 @@ define amdgpu_kernel void @global_truncstore_v8f32_to_v8f16(ptr addrspace(1) %ou ; VI-NEXT: v_or_b32_e32 v2, v4, v5 ; VI-NEXT: flat_store_dwordx4 v[8:9], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_truncstore_v8f32_to_v8f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v8, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_load_b128 v[0:3], v8, s[2:3] offset:16 +; GFX11-NEXT: global_load_b128 v[4:7], v8, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(1) +; GFX11-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX11-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX11-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX11-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f16_f32_e32 v7, v7 +; GFX11-NEXT: v_cvt_f16_f32_e32 v6, v6 +; GFX11-NEXT: v_cvt_f16_f32_e32 v5, v5 +; GFX11-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX11-NEXT: v_pack_b32_f16 v3, v2, v3 +; GFX11-NEXT: v_pack_b32_f16 v2, v0, v1 +; GFX11-NEXT: v_pack_b32_f16 v1, v6, v7 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-NEXT: v_pack_b32_f16 v0, v4, v5 +; GFX11-NEXT: global_store_b128 v8, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <8 x float>, ptr addrspace(1) %in %cvt = fptrunc <8 x float> %val to <8 x half> store <8 x half> %cvt, ptr addrspace(1) %out @@ -1815,6 +2514,50 @@ define amdgpu_kernel void @global_truncstore_v16f32_to_v16f16(ptr addrspace(1) % ; VI-NEXT: v_or_b32_e32 v2, v12, v13 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: global_truncstore_v16f32_to_v16f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v16, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_clause 0x3 +; GFX11-NEXT: global_load_b128 v[0:3], v16, s[2:3] offset:16 +; GFX11-NEXT: global_load_b128 v[4:7], v16, s[2:3] +; GFX11-NEXT: global_load_b128 v[8:11], v16, s[2:3] offset:48 +; GFX11-NEXT: global_load_b128 v[12:15], v16, s[2:3] offset:32 +; GFX11-NEXT: s_waitcnt vmcnt(3) +; GFX11-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX11-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX11-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX11-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX11-NEXT: s_waitcnt vmcnt(2) +; GFX11-NEXT: v_cvt_f16_f32_e32 v7, v7 +; GFX11-NEXT: v_cvt_f16_f32_e32 v6, v6 +; GFX11-NEXT: v_cvt_f16_f32_e32 v17, v5 +; GFX11-NEXT: v_cvt_f16_f32_e32 v18, v4 +; GFX11-NEXT: s_waitcnt vmcnt(1) +; GFX11-NEXT: v_cvt_f16_f32_e32 v4, v11 +; GFX11-NEXT: v_cvt_f16_f32_e32 v5, v10 +; GFX11-NEXT: v_cvt_f16_f32_e32 v9, v9 +; GFX11-NEXT: v_cvt_f16_f32_e32 v8, v8 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cvt_f16_f32_e32 v10, v15 +; GFX11-NEXT: v_cvt_f16_f32_e32 v11, v14 +; GFX11-NEXT: v_cvt_f16_f32_e32 v13, v13 +; GFX11-NEXT: v_cvt_f16_f32_e32 v12, v12 +; GFX11-NEXT: v_pack_b32_f16 v3, v2, v3 +; GFX11-NEXT: v_pack_b32_f16 v2, v0, v1 +; GFX11-NEXT: v_pack_b32_f16 v1, v6, v7 +; GFX11-NEXT: v_pack_b32_f16 v7, v5, v4 +; GFX11-NEXT: v_pack_b32_f16 v6, v8, v9 +; GFX11-NEXT: v_pack_b32_f16 v5, v11, v10 +; GFX11-NEXT: v_pack_b32_f16 v4, v12, v13 +; GFX11-NEXT: v_pack_b32_f16 v0, v18, v17 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: global_store_b128 v16, v[4:7], s[0:1] offset:16 +; GFX11-NEXT: global_store_b128 v16, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load <16 x float>, ptr addrspace(1) %in %cvt = fptrunc <16 x float> %val to <16 x half> store <16 x half> %cvt, ptr addrspace(1) %out @@ -1851,6 +2594,20 @@ define amdgpu_kernel void @fadd_f16(ptr addrspace(1) %out, half %a, half %b) #0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: flat_store_short v[0:1], v2 ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: fadd_f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x8 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s3, s2, 16 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_add_f16_e64 v1, s2, s3 +; GFX11-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %add = fadd half %a, %b store half %add, ptr addrspace(1) %out, align 4 ret void @@ -1894,6 +2651,16 @@ define amdgpu_kernel void @fadd_v2f16(ptr addrspace(1) %out, <2 x half> %a, <2 x ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: flat_store_dword v[0:1], v2 ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: fadd_v2f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_pk_add_f16 v1, s2, s3 +; GFX11-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %add = fadd <2 x half> %a, %b store <2 x half> %add, ptr addrspace(1) %out, align 8 ret void @@ -1955,6 +2722,19 @@ define amdgpu_kernel void @fadd_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_or_b32_e32 v0, v0, v3 ; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: fadd_v4f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_b128 v[0:3], v4, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_pk_add_f16 v1, v1, v3 +; GFX11-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX11-NEXT: global_store_b64 v4, v[0:1], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %b_ptr = getelementptr <4 x half>, ptr addrspace(1) %in, i32 1 %a = load <4 x half>, ptr addrspace(1) %in, align 16 %b = load <4 x half>, ptr addrspace(1) %b_ptr, align 16 @@ -2063,24 +2843,50 @@ define amdgpu_kernel void @fadd_v8f16(ptr addrspace(1) %out, <8 x half> %a, <8 x ; VI-NEXT: v_mov_b32_e32 v4, s0 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm +; +; GFX11-LABEL: fadd_v8f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b256 s[4:11], s[0:1], 0x10 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_pk_add_f16 v3, s7, s11 +; GFX11-NEXT: v_pk_add_f16 v2, s6, s10 +; GFX11-NEXT: v_pk_add_f16 v1, s5, s9 +; GFX11-NEXT: v_pk_add_f16 v0, s4, s8 +; GFX11-NEXT: global_store_b128 v4, v[0:3], s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %add = fadd <8 x half> %a, %b store <8 x half> %add, ptr addrspace(1) %out, align 32 ret void } define amdgpu_kernel void @test_bitcast_from_half(ptr addrspace(1) %in, ptr addrspace(1) %out) #0 { -; GCN-LABEL: test_bitcast_from_half: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: flat_load_ushort v2, v[0:1] -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: flat_store_short v[0:1], v2 -; GCN-NEXT: s_endpgm +; CIVI-LABEL: test_bitcast_from_half: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: flat_load_ushort v2, v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: flat_store_short v[0:1], v2 +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: test_bitcast_from_half: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: global_store_b16 v0, v1, s[2:3] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load half, ptr addrspace(1) %in %val_int = bitcast half %val to i16 store i16 %val_int, ptr addrspace(1) %out @@ -2088,18 +2894,29 @@ define amdgpu_kernel void @test_bitcast_from_half(ptr addrspace(1) %in, ptr addr } define amdgpu_kernel void @test_bitcast_to_half(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { -; GCN-LABEL: test_bitcast_to_half: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NEXT: flat_load_ushort v2, v[0:1] -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: flat_store_short v[0:1], v2 -; GCN-NEXT: s_endpgm +; CIVI-LABEL: test_bitcast_to_half: +; CIVI: ; %bb.0: +; CIVI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CIVI-NEXT: s_waitcnt lgkmcnt(0) +; CIVI-NEXT: v_mov_b32_e32 v0, s2 +; CIVI-NEXT: v_mov_b32_e32 v1, s3 +; CIVI-NEXT: flat_load_ushort v2, v[0:1] +; CIVI-NEXT: v_mov_b32_e32 v0, s0 +; CIVI-NEXT: v_mov_b32_e32 v1, s1 +; CIVI-NEXT: s_waitcnt vmcnt(0) +; CIVI-NEXT: flat_store_short v[0:1], v2 +; CIVI-NEXT: s_endpgm +; +; GFX11-LABEL: test_bitcast_to_half: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: global_load_u16 v1, v0, s[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm %val = load i16, ptr addrspace(1) %in %val_fp = bitcast i16 %val to half store half %val_fp, ptr addrspace(1) %out -- 2.7.4