; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load16_lane 0, 0
+; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %p, <8 x i16> %v, i32 0)
ret <8 x i16> %t
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load16_lane 0, 0
+; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i16* %p to i32
%r = add nuw i32 %q, 24
; CHECK-NEXT: i32.const 12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load16_lane 0, 0
+; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i16, i16* %p, i32 6
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
; CHECK-NEXT: i32.const -12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load16_lane 0, 0
+; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i16, i16* %p, i32 -6
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load16_lane 0, 0
+; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i16* %p to i32
%r = add nsw i32 %q, 24
; CHECK-NEXT: i32.const 12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load16_lane 0, 0
+; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i16, i16* %p, i32 6
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load16_lane 0, 0
+; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i16*
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i16
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load16_lane 0, 0
+; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
ret <8 x i16> %t
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store16_lane 0, 0
+; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store16.lane(i16* %p, <8 x i16> %v, i32 0)
ret void
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store16_lane 0, 0
+; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i16* %p to i32
%r = add nuw i32 %q, 24
; CHECK-NEXT: i32.const 12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store16_lane 0, 0
+; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i16, i16* %p, i32 6
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
; CHECK-NEXT: i32.const -12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store16_lane 0, 0
+; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i16, i16* %p, i32 -6
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store16_lane 0, 0
+; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i16* %p to i32
%r = add nsw i32 %q, 24
; CHECK-NEXT: i32.const 12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store16_lane 0, 0
+; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i16, i16* %p, i32 6
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store16_lane 0, 0
+; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i16*
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i16
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store16_lane 0, 0
+; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
ret void
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load32_lane 0, 0
+; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %p, <4 x i32> %v, i32 0)
ret <4 x i32> %t
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load32_lane 0, 0
+; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nuw i32 %q, 24
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load32_lane 0, 0
+; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 6
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
; CHECK-NEXT: i32.const -24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load32_lane 0, 0
+; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 -6
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load32_lane 0, 0
+; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nsw i32 %q, 24
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load32_lane 0, 0
+; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i32, i32* %p, i32 6
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load32_lane 0, 0
+; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i32*
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i32
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load32_lane 0, 0
+; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
ret <4 x i32> %t
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store32_lane 0, 0
+; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store32.lane(i32* %p, <4 x i32> %v, i32 0)
ret void
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store32_lane 0, 0
+; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nuw i32 %q, 24
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store32_lane 0, 0
+; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 6
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
; CHECK-NEXT: i32.const -24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store32_lane 0, 0
+; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 -6
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store32_lane 0, 0
+; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nsw i32 %q, 24
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store32_lane 0, 0
+; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i32, i32* %p, i32 6
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store32_lane 0, 0
+; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i32*
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i32
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store32_lane 0, 0
+; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
ret void
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load64_lane 0, 0
+; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %p, <2 x i64> %v, i32 0)
ret <2 x i64> %t
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load64_lane 0, 0
+; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nuw i32 %q, 24
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load64_lane 0, 0
+; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i32 6
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
; CHECK-NEXT: i32.const -48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load64_lane 0, 0
+; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i32 -6
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load64_lane 0, 0
+; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nsw i32 %q, 24
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
-; CHECK-NEXT: v128.load64_lane 0, 0
+; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i64, i64* %p, i32 6
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load64_lane 0, 0
+; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i64*
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i64
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load64_lane 0, 0
+; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
ret <2 x i64> %t
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store64_lane 0, 0
+; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store64.lane(i64* %p, <2 x i64> %v, i32 0)
ret void
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store64_lane 0, 0
+; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nuw i32 %q, 24
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store64_lane 0, 0
+; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i32 6
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
; CHECK-NEXT: i32.const -48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store64_lane 0, 0
+; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i32 -6
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store64_lane 0, 0
+; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nsw i32 %q, 24
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store64_lane 0, 0
+; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i64, i64* %p, i32 6
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store64_lane 0, 0
+; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i64*
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i64
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.store64_lane 0, 0
+; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
ret void
; CHECK: .functype load_zero_i32_no_offset (i32) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load32_zero 0
+; CHECK-NEXT: v128.load32_zero 0:p2align=0
; CHECK-NEXT: # fallthrough-return
%v = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %p)
ret <4 x i32> %v
; CHECK: .functype load_zero_i32_with_folded_offset (i32) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load32_zero 24
+; CHECK-NEXT: v128.load32_zero 24:p2align=0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nuw i32 %q, 24
; CHECK: .functype load_zero_i32_with_folded_gep_offset (i32) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load32_zero 24
+; CHECK-NEXT: v128.load32_zero 24:p2align=0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 6
%t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %s)
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const -24
; CHECK-NEXT: i32.add
-; CHECK-NEXT: v128.load32_zero 0
+; CHECK-NEXT: v128.load32_zero 0:p2align=0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 -6
%t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %s)
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
-; CHECK-NEXT: v128.load32_zero 0
+; CHECK-NEXT: v128.load32_zero 0:p2align=0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nsw i32 %q, 24
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
-; CHECK-NEXT: v128.load32_zero 0
+; CHECK-NEXT: v128.load32_zero 0:p2align=0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i32, i32* %p, i32 6
%t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %s)
; CHECK: .functype load_zero_i32_from_numeric_address () -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 0
-; CHECK-NEXT: v128.load32_zero 42
+; CHECK-NEXT: v128.load32_zero 42:p2align=0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i32*
%t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %s)
; CHECK: .functype load_zero_i32_from_global_address () -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 0
-; CHECK-NEXT: v128.load32_zero gv_i32
+; CHECK-NEXT: v128.load32_zero gv_i32:p2align=0
; CHECK-NEXT: # fallthrough-return
%t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* @gv_i32)
ret <4 x i32> %t
; CHECK: .functype load_zero_i64_no_offset (i32) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load64_zero 0
+; CHECK-NEXT: v128.load64_zero 0:p2align=0
; CHECK-NEXT: # fallthrough-return
%v = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %p)
ret <2 x i64> %v
; CHECK: .functype load_zero_i64_with_folded_offset (i32) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load64_zero 24
+; CHECK-NEXT: v128.load64_zero 24:p2align=0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nuw i32 %q, 24
; CHECK: .functype load_zero_i64_with_folded_gep_offset (i32) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: v128.load64_zero 48
+; CHECK-NEXT: v128.load64_zero 48:p2align=0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i64 6
%t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %s)
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const -48
; CHECK-NEXT: i32.add
-; CHECK-NEXT: v128.load64_zero 0
+; CHECK-NEXT: v128.load64_zero 0:p2align=0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i64 -6
%t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %s)
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
-; CHECK-NEXT: v128.load64_zero 0
+; CHECK-NEXT: v128.load64_zero 0:p2align=0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nsw i32 %q, 24
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
-; CHECK-NEXT: v128.load64_zero 0
+; CHECK-NEXT: v128.load64_zero 0:p2align=0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i64, i64* %p, i64 6
%t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %s)
; CHECK: .functype load_zero_i64_from_numeric_address () -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 0
-; CHECK-NEXT: v128.load64_zero 42
+; CHECK-NEXT: v128.load64_zero 42:p2align=0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i64*
%t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %s)
; CHECK: .functype load_zero_i64_from_global_address () -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 0
-; CHECK-NEXT: v128.load64_zero gv_i64
+; CHECK-NEXT: v128.load64_zero gv_i64:p2align=0
; CHECK-NEXT: # fallthrough-return
%t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* @gv_i64)
ret <2 x i64> %t