+2019-07-03 Michael Meissner <meissner@linux.ibm.com>
+
+ * config/rs6000/altivec.md (altivec_mov<mode>, VM2 iterator):
+ Change the RTL attribute "length" from "4" to "*" to allow the
+ length attribute to be adjusted automatically for prefixed load,
+ store, and add immediate instructions.
+ * config/rs6000/rs6000.md (extendhi<mode>2, EXTHI iterator):
+ Likewise.
+ (extendsi<mode>2, EXTSI iterator): Likewise.
+ (movsi_internal1): Likewise.
+ (movsi_from_sf): Likewise.
+ (movdi_from_sf_zero_ext): Likewise.
+ (mov<mode>_internal): Likewise.
+ (movcc_internal1, QHI iterator): Likewise.
+ (mov<mode>_softfloat, FMOVE32 iterator): Likewise.
+ (movsf_from_si): Likewise.
+ (mov<mode>_hardfloat32, FMOVE64 iterator): Likewise.
+ (mov<mode>_softfloat64, FMOVE64 iterator): Likewise.
+ (mov<mode>, FMOVE128 iterator): Likewise.
+ (movdi_internal64): Likewise.
+ * config/rs6000/vsx.md (vsx_le_permute_<mode>, VSX_TI iterator):
+ Likewise.
+ (vsx_le_undo_permute_<mode>, VSX_TI iterator): Likewise.
+ (vsx_mov<mode>_64bit, VSX_M iterator): Likewise.
+ (vsx_mov<mode>_32bit, VSX_M iterator): Likewise.
+ (vsx_splat_v4sf): Likewise.
+
2019-07-03 Mark Wielaard <mark@klomp.org>
PR debug/90981
* return output_vec_const_move (operands);
#"
[(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*")
- (set_attr "length" "4,4,4,20,20,20,4,8,32")])
+ (set_attr "length" "*,*,*,20,20,20,*,8,32")])
;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode
;; is for unions. However for plain data movement, slightly favor the vector
vextsh2d %0,%1"
[(set_attr "type" "load,exts,fpload,vecperm")
(set_attr "sign_extend" "yes")
- (set_attr "length" "4,4,8,4")
+ (set_attr "length" "*,*,8,*")
(set_attr "isa" "*,*,p9v,p9v")])
(define_split
#"
[(set_attr "type" "load,exts,fpload,fpload,mffgpr,vecexts,vecperm,mftgpr")
(set_attr "sign_extend" "yes")
- (set_attr "length" "4,4,4,4,4,4,8,8")
+ (set_attr "length" "*,*,*,*,*,*,8,8")
(set_attr "isa" "*,*,p6,p8v,p8v,p9v,p8v,p8v")])
(define_split
veclogical, veclogical, vecsimple, mffgpr, mftgpr,
*, *, *")
(set_attr "length"
- "4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4,
- 8, 4, 4, 4, 4,
- 4, 4, 8, 4, 4,
- 4, 4, 4")
+ "*, *, *, *, *,
+ *, *, *, *, *,
+ 8, *, *, *, *,
+ *, *, 8, *, *,
+ *, *, *")
(set_attr "isa"
"*, *, *, p8v, p8v,
*, p8v, p8v, *, *,
fpstore, fpstore, fpstore, mftgpr, fp,
mffgpr")
(set_attr "length"
- "4, 4, 4, 4, 4,
- 4, 4, 4, 8, 4,
- 4")
+ "*, *, *, *, *,
+ *, *, *, 8, *,
+ *")
(set_attr "isa"
"*, *, p8v, p8v, *,
*, p9v, p8v, p8v, p8v,
"*, load, fpload, fpload, two,
two, mffgpr")
(set_attr "length"
- "4, 4, 4, 4, 8,
- 8, 4")
+ "*, *, *, *, 8,
+ 8, *")
(set_attr "isa"
"*, *, p8v, p8v, p8v,
p9v, p8v")])
vecsimple, vecperm, vecperm, vecperm, vecperm, mftgpr,
mffgpr, mfjmpr, mtjmpr, *")
(set_attr "length"
- "4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 8, 4,
- 4, 4, 4, 4")
+ "*, *, *, *, *, *,
+ *, *, *, *, 8, *,
+ *, *, *, *")
(set_attr "isa"
"*, *, p9v, *, p9v, *,
p9v, p9v, p9v, p9v, p9v, p9v,
(const_string "mtjmpr")
(const_string "load")
(const_string "store")])
- (set_attr "length" "4,4,12,4,4,8,4,4,4,4,4,4")])
+ (set_attr "length" "*,*,12,*,*,8,*,*,*,*,*,*")])
\f
;; For floating-point, we normally deal with the floating-point registers
;; unless -msoft-float is used. The sole exception is that parameter passing
nop"
[(set_attr "type"
"*, mtjmpr, mfjmpr, load, store, *,
- *, *, *, *")
+ *, *, *, *")
(set_attr "length"
- "4, 4, 4, 4, 4, 4,
- 4, 4, 8, 4")])
+ "*, *, *, *, *, *,
+ *, *, 8, *")])
;; Like movsf, but adjust a SI value to be used in a SF context, i.e.
;; (set (reg:SF ...) (subreg:SF (reg:SI ...) 0))
DONE;
}
[(set_attr "length"
- "4, 4, 4, 4, 4, 4,
- 4, 12, 4, 4")
+ "*, *, *, *, *, *,
+ *, 12, *, *")
(set_attr "type"
"load, fpload, fpload, fpload, store, fpstore,
fpstore, vecfloat, mffgpr, *")
store, load, two")
(set_attr "size" "64")
(set_attr "length"
- "4, 4, 4, 4, 4,
- 4, 4, 4, 4, 8,
+ "*, *, *, *, *,
+ *, *, *, *, 8,
8, 8, 8")
(set_attr "isa"
"*, *, *, p9v, p9v,
*, *, *")
(set_attr "length"
- "4, 4, 4, 4, 4, 8,
- 12, 16, 4")])
+ "*, *, *, *, *, 8,
+ 12, 16, *")])
\f
(define_expand "mov<mode>"
[(set (match_operand:FMOVE128 0 "general_operand")
vecsimple")
(set_attr "size" "64")
(set_attr "length"
- "8, 8, 8, 4, 4, 4,
- 16, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 8,
- 4")
+ "8, 8, 8, *, *, *,
+ 16, *, *, *, *, *,
+ *, *, *, *, *, 8,
+ *")
(set_attr "isa"
"*, *, *, *, *, *,
*, p9v, p7v, p9v, p7v, *,
mftgpr, mffgpr")
(set_attr "size" "64")
(set_attr "length"
- "4, 4, 4, 4, 4, 20,
- 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4,
- 4, 8, 4, 4, 4, 4,
- 4, 4")
+ "*, *, *, *, *, 20,
+ *, *, *, *, *, *,
+ *, *, *, *, *, *,
+ *, 8, *, *, *, *,
+ *, *")
(set_attr "isa"
"*, *, *, *, *, *,
*, *, *, p9v, p7v, p9v,
mr %0,%L1\;mr %L0,%1
ld%U1%X1 %0,%L1\;ld%U1%X1 %L0,%1
std%U0%X0 %L1,%0\;std%U0%X0 %1,%L0"
- [(set_attr "length" "4,4,4,8,8,8")
+ [(set_attr "length" "*,*,*,8,8,8")
(set_attr "type" "vecperm,vecload,vecstore,*,load,store")])
(define_insn_and_split "*vsx_le_undo_permute_<mode>"
store, load, store, *, vecsimple, vecsimple,
vecsimple, *, *, vecstore, vecload")
(set_attr "length"
- "4, 4, 4, 8, 4, 8,
- 8, 8, 8, 8, 4, 4,
- 4, 20, 8, 4, 4")
+ "*, *, *, 8, *, 8,
+ 8, 8, 8, 8, *, *,
+ *, 20, 8, *, *")
(set_attr "isa"
"<VSisa>, <VSisa>, <VSisa>, *, *, *,
*, *, *, *, p9v, *,
vecsimple, vecsimple, vecsimple, *, *,
vecstore, vecload")
(set_attr "length"
- "4, 4, 4, 16, 16, 16,
- 4, 4, 4, 20, 16,
- 4, 4")
+ "*, *, *, 16, 16, 16,
+ *, *, *, 20, 16,
+ *, *")
(set_attr "isa"
"<VSisa>, <VSisa>, <VSisa>, *, *, *,
p9v, *, <VSisa>, *, *,
(const_int 0)] UNSPEC_VSX_XXSPLTW))]
""
[(set_attr "type" "vecload,vecperm,mftgpr")
- (set_attr "length" "4,8,4")
+ (set_attr "length" "*,8,*")
(set_attr "isa" "*,p8v,*")])
;; V4SF/V4SI splat from a vector element