"vld1.32 d0[0], [%[d]] \n\t"
// Only touch d1
"vmull.u8 q0, d0, d14 \n\t"
- "vshrn.u16 d0, q0, #8 \n\t"
+ "vqrshrn.u16 d0, q0, #8 \n\t"
"vadd.u8 d0, d12, d0 \n\t"
"vst1.32 d0[0], [%[d]] \n\t"
AP "dualloopint: \n\t"
"vldr.32 d0, [%[d]] \n\t"
"vmull.u8 q1, d0, d14 \n\t"
- "vshrn.u16 d0, q1, #8 \n\t"
+ "vqrshrn.u16 d0, q1, #8 \n\t"
"vqadd.u8 d0, d0, d12 \n\t"
"vstm %[d]!, {d0} \n\t"
"vmull.u8 q4, d2, d14 \n\t"
"vmull.u8 q5, d3, d15 \n\t"
- "vshrn.u16 d0, q2, #8 \n\t"
- "vshrn.u16 d1, q3, #8 \n\t"
- "vshrn.u16 d2, q4, #8 \n\t"
- "vshrn.u16 d3, q5, #8 \n\t"
+ "vqrshrn.u16 d0, q2, #8 \n\t"
+ "vqrshrn.u16 d1, q3, #8 \n\t"
+ "vqrshrn.u16 d2, q4, #8 \n\t"
+ "vqrshrn.u16 d3, q5, #8 \n\t"
"vqadd.u8 q0, q6, q0 \n\t"
"vqadd.u8 q1, q6, q1 \n\t"
AP "dualloop2int: \n\t"
"vldr.64 d0, [%[d]] \n\t"
"vmull.u8 q1, d0, d14 \n\t"
- "vshrn.u16 d0, q1, #8 \n\t"
+ "vqrshrn.u16 d0, q1, #8 \n\t"
"vqadd.u8 d0, d0, d12 \n\t"
"vstr.64 d0, [%[d]] \n\t"
AP "singleloop2: \n\t"
"vld1.32 d0[0], [%[d]] \n\t"
"vmull.u8 q1, d0, d14 \n\t"
- "vshrn.u16 d0, q1, #8 \n\t"
+ "vqrshrn.u16 d0, q1, #8 \n\t"
"vqadd.u8 d0, d0, d12 \n\t"
"vst1.32 d0[0], [%[d]] \n\t"
" vld1.32 d4[0], [%[d]] \n\t"
" vdup.u8 d0, d0[0] \n\t"
" vmull.u8 q4, d0, d30 \n\t"
- " vshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
" vmvn.u16 d14, d12 \n\t"
" vshr.u32 d16, d14, #24 \n\t"
" vmul.u32 d16, d16, d28 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
" vqadd.u8 d0, d0, d12 \n\t"
" vst1.32 d0[0], [%[d]]! \n\t"
" vmovl.u8 q0, d0 \n\t"
" vmul.u32 q0, q14 \n\t"
" vmull.u8 q4, d0, d30 \n\t"
- " vshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
" vmvn.u16 d14, d12 \n\t"
" vshr.u32 d16, d14, #24 \n\t"
" vmul.u32 d16, d16, d28 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
" vqadd.u8 q0, q0, q6 \n\t"
" vstm %[d]!, {d0} \n\t"
" vmull.u8 q5, d1, d31 \n\t"
// Shorten
- " vshrn.u16 d12, q4, #8 \n\t"
- " vshrn.u16 d13, q5, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d13, q5, #8 \n\t"
// extract negated alpha
" vmvn.u16 q7, q6 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
" vmull.u8 q8, d17, d5 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
- " vshrn.u16 d1, q8, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d1, q8, #8 \n\t"
// Add
" vqadd.u8 q0, q0, q6 \n\t"
" vmovl.u8 q0, d0 \n\t"
" vmul.u32 q0, q14 \n\t"
" vmull.u8 q4, d0, d30 \n\t"
- " vshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
" vmvn.u16 d14, d12 \n\t"
" vshr.u32 d16, d14, #24 \n\t"
" vmul.u32 d16, d16, d28 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
" vqadd.u8 q0, q0, q6 \n\t"
" vstm %[d]!, {d0} \n\t"
" vld1.32 d4[0], [%[d]] \n\t"
" vdup.u8 d0, d0[0] \n\t"
" vmull.u8 q4, d0, d30 \n\t"
- " vshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
" vmvn.u16 d14, d12 \n\t"
" vshr.u32 d16, d14, #24 \n\t"
" vmul.u32 d16, d16, d28 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
" vqadd.u8 q0, q0, q6 \n\t"
" vst1.32 d0[0], [%[d]]! \n\t"
// Mulitply s * c (= sc)
"vmull.u8 q4, d0,d14 \n\t"
// sc in d8
- "vshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
// sca in d9
"vmvn.u32 d6, d4 \n\t"
/* d * alpha */
"vmull.u8 q4, d6, d2 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
"vqadd.u8 d2, d0, d4 \n\t"
// Mulitply s * c (= sc)
"vmull.u8 q4, d0,d14 \n\t"
// sc in d8
- "vshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
// sca in d9
"vmvn.u32 d6, d4 \n\t"
/* d * alpha */
"vmull.u8 q4, d6, d2 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
"vqadd.u8 d2, d0, d4 \n\t"
"vmull.u8 q5, d1,d14 \n\t"
// Get sc & sc alpha
- "vshrn.u16 d4, q4, #8 \n\t"
- "vshrn.u16 d5, q5, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d5, q5, #8 \n\t"
// sc is now in q2, 8bpp
// Shift out, then spread alpha for q2
"vmvn.u32 q3, q2 \n\t"
"vmull.u8 q4, d6,d2 \n\t"
"vmull.u8 q5, d7,d3 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
- "vshrn.u16 d1, q5, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d1, q5, #8 \n\t"
"vqadd.u8 q1, q0, q2 \n\t"
// Mulitply s * c (= sc)
"vmull.u8 q4, d0,d14 \n\t"
// sc in d8
- "vshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
// sca in d9
// XXX: I can probably squash one of these 3
/* d * alpha */
"vmull.u8 q4, d6, d2 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
"vqadd.u8 d2, d0, d4 \n\t"
// Mulitply s * c (= sc)
"vmull.u8 q4, d0,d14 \n\t"
// sc in d8
- "vshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
// sca in d6
"vmvn.u32 d6, d4 \n\t"
/* d * alpha */
"vmull.u8 q4, d6, d2 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
"vqadd.u8 d2, d0, d4 \n\t"
static void
_op_blend_pas_dp_mmx(DATA32 *s, DATA8 *m __UNUSED__, DATA32 c __UNUSED__, DATA32 *d, int l) {
+ _op_blend_p_dp_mmx(s, m, c, d, l);
+ return;
DATA32 *e = d + l;
pxor_r2r(mm0, mm0);
MOV_A2R(ALPHA_256, mm6)
"vmul.u32 d8, d16, d8 \n\t"
"vmull.u8 q6, d4,d8 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to 's'
"vqadd.u8 q2, q4,q0 \n\t"
"vmul.u32 d8, d16, d8 \n\t"
"vmull.u8 q6, d4,d8 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to 's'
"vqadd.u8 d4, d8,d0 \n\t"
"vstr d4, [%[d]] \n\t"
"vmull.u8 q2, d5,d9 \n\t"
// Shift & narrow it
- "vshrn.u16 d8, q6, #8 \n\t"
- "vshrn.u16 d9, q2, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d9, q2, #8 \n\t"
// Add to s
"vqadd.u8 q2, q4,q0 \n\t"
"cmp %[tmp], %[d]\n\t"
// Shift & narrow it
- "vshrn.u16 d8, q6, #8 \n\t"
- "vshrn.u16 d9, q2, #8 \n\t"
- "vshrn.u16 d10, q7, #8 \n\t"
- "vshrn.u16 d11, q3, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d9, q2, #8 \n\t"
+ "vqrshrn.u16 d10, q7, #8 \n\t"
+ "vqrshrn.u16 d11, q3, #8 \n\t"
// Add to s
"vmul.u32 d8, d16, d8 \n\t"
"vmull.u8 q6, d4,d8 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to 's'
"vqadd.u8 d4, d8,d0 \n\t"
"vmul.u32 d8, d8, d16 \n\t"
"vmull.u8 q6, d8,d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to 's'
"vqadd.u8 d0, d0,d8 \n\t"
"vst1.32 d0[0], [%[d]] \n\t"
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to s
"vqadd.u8 d0, d0,d8 \n\t"
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to s
"vqadd.u8 d0, d0,d8 \n\t"
"add %[pl], %[d], #32\n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
- "vshrn.u16 d10, q7, #8 \n\t"
- "vshrn.u16 d9, q2, #8 \n\t"
- "vshrn.u16 d11, q3, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d10, q7, #8 \n\t"
+ "vqrshrn.u16 d9, q2, #8 \n\t"
+ "vqrshrn.u16 d11, q3, #8 \n\t"
"pld [%[pl]]\n\t"
"cmp %[tmp], %[pl] \n\t"
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to s
"vqadd.u8 d0, d0,d8 \n\t"
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to s
"vqadd.u8 d0, d0,d8 \n\t"