; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB35_4
; RV32I-NEXT: .LBB35_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB35_4
; RV64I-NEXT: .LBB35_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB36_4
; RV32I-NEXT: .LBB36_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB36_4
; RV64I-NEXT: .LBB36_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB37_4
; RV32I-NEXT: .LBB37_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB37_4
; RV64I-NEXT: .LBB37_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB38_4
; RV32I-NEXT: .LBB38_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB38_4
; RV64I-NEXT: .LBB38_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB39_4
; RV32I-NEXT: .LBB39_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB39_4
; RV64I-NEXT: .LBB39_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB40_4
; RV32I-NEXT: .LBB40_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB40_4
; RV64I-NEXT: .LBB40_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB41_4
; RV32I-NEXT: .LBB41_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB41_4
; RV64I-NEXT: .LBB41_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB42_4
; RV32I-NEXT: .LBB42_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB42_4
; RV64I-NEXT: .LBB42_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB43_4
; RV32I-NEXT: .LBB43_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB43_4
; RV64I-NEXT: .LBB43_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB44_4
; RV32I-NEXT: .LBB44_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB44_4
; RV64I-NEXT: .LBB44_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB45_4
; RV32I-NEXT: .LBB45_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB45_4
; RV64I-NEXT: .LBB45_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB46_4
; RV32I-NEXT: .LBB46_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB46_4
; RV64I-NEXT: .LBB46_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB47_4
; RV32I-NEXT: .LBB47_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB47_4
; RV64I-NEXT: .LBB47_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB48_4
; RV32I-NEXT: .LBB48_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB48_4
; RV64I-NEXT: .LBB48_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB49_4
; RV32I-NEXT: .LBB49_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB49_4
; RV64I-NEXT: .LBB49_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB50_4
; RV32I-NEXT: .LBB50_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB50_4
; RV64I-NEXT: .LBB50_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB51_4
; RV32I-NEXT: .LBB51_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB51_4
; RV64I-NEXT: .LBB51_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB52_4
; RV32I-NEXT: .LBB52_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB52_4
; RV64I-NEXT: .LBB52_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB53_4
; RV32I-NEXT: .LBB53_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB53_4
; RV64I-NEXT: .LBB53_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
-; RV32I-NEXT: lb a3, 15(sp)
+; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB54_4
; RV32I-NEXT: .LBB54_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
-; RV64I-NEXT: lb a3, 15(sp)
+; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB54_4
; RV64I-NEXT: .LBB54_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64ZVE32F-NEXT: .LBB1_2: # %else2
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB1_3: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB1_2
; RV64ZVE32F-NEXT: .LBB1_4: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB2_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB2_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB2_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB3_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB3_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB3_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB4_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB4_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB4_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB5_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB5_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB5_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB6_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB6_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB6_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: andi a3, a2, 1
; RV64ZVE32F-NEXT: beqz a3, .LBB7_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: .LBB7_2: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB7_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: lb a0, 0(a1)
+; RV64ZVE32F-NEXT: lbu a0, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB8_5: # %cond.load
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB8_2
; RV64ZVE32F-NEXT: .LBB8_6: # %cond.load1
; RV64ZVE32F-NEXT: ld a2, 8(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: beqz a2, .LBB8_3
; RV64ZVE32F-NEXT: .LBB8_7: # %cond.load4
; RV64ZVE32F-NEXT: ld a2, 16(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: beqz a1, .LBB8_4
; RV64ZVE32F-NEXT: .LBB8_8: # %cond.load7
; RV64ZVE32F-NEXT: ld a0, 24(a0)
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB9_5: # %cond.load
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB9_2
; RV64ZVE32F-NEXT: .LBB9_6: # %cond.load1
; RV64ZVE32F-NEXT: ld a2, 8(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: beqz a2, .LBB9_3
; RV64ZVE32F-NEXT: .LBB9_7: # %cond.load4
; RV64ZVE32F-NEXT: ld a2, 16(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
; RV64ZVE32F-NEXT: beqz a1, .LBB9_4
; RV64ZVE32F-NEXT: .LBB9_8: # %cond.load7
; RV64ZVE32F-NEXT: ld a0, 24(a0)
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB11_9: # %cond.load
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB11_2
; RV64ZVE32F-NEXT: .LBB11_10: # %cond.load1
; RV64ZVE32F-NEXT: ld a2, 8(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: beqz a2, .LBB11_3
; RV64ZVE32F-NEXT: .LBB11_11: # %cond.load4
; RV64ZVE32F-NEXT: ld a2, 16(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: beqz a2, .LBB11_4
; RV64ZVE32F-NEXT: .LBB11_12: # %cond.load7
; RV64ZVE32F-NEXT: ld a2, 24(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: beqz a2, .LBB11_5
; RV64ZVE32F-NEXT: .LBB11_13: # %cond.load10
; RV64ZVE32F-NEXT: ld a2, 32(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: beqz a2, .LBB11_6
; RV64ZVE32F-NEXT: .LBB11_14: # %cond.load13
; RV64ZVE32F-NEXT: ld a2, 40(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: beqz a2, .LBB11_7
; RV64ZVE32F-NEXT: .LBB11_15: # %cond.load16
; RV64ZVE32F-NEXT: ld a2, 48(a0)
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: beqz a1, .LBB11_8
; RV64ZVE32F-NEXT: .LBB11_16: # %cond.load19
; RV64ZVE32F-NEXT: ld a0, 56(a0)
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a0
; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: .LBB12_2: # %else
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1
; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: .LBB12_15: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
; RV64ZVE32F-NEXT: .LBB97_2: # %else
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1
; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 2
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 5
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 9
; RV64ZVE32F-NEXT: # %bb.16: # %cond.load28
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 10
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 13
; RV64ZVE32F-NEXT: # %bb.22: # %cond.load40
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 14
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 15
; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 3
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m1, tu, ma
; RV64ZVE32F-NEXT: .LBB97_28: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 6
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 7
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 11
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vslidedown.vi v12, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: # %bb.16: # %cond.load28
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: # %bb.20: # %cond.load34
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vslidedown.vi v9, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: # %bb.29: # %cond.load52
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vslidedown.vi v12, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: # %bb.40: # %cond.load76
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: # %bb.46: # %cond.load88
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lb a0, 0(a0)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: li a1, 32
; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: .LBB98_52: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: .LBB98_55: # %cond.load40
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: .LBB98_60: # %cond.load64
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lb a2, 0(a2)
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v12, a2