/* Only need this for fixed-size arrays, for structs just assign. */
#define vp8_copy(Dest, Src) \
- do { \
+ { \
assert(sizeof(Dest) == sizeof(Src)); \
memcpy(Dest, Src, sizeof(Src)); \
- } while (0)
+ }
/* Use this for variably-sized arrays. */
#define vp8_copy_array(Dest, Src, N) \
- do { \
+ { \
assert(sizeof(*(Dest)) == sizeof(*(Src))); \
memcpy(Dest, Src, (N) * sizeof(*(Src))); \
- } while (0)
+ }
#define vp8_zero(Dest) memset(&(Dest), 0, sizeof(Dest));
static const int32_t sinpi8sqrt2 = 35468;
#define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 s4_m, s5_m, s6_m, s7_m; \
\
TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \
ILVR_D2_SH(s6_m, s4_m, s7_m, s5_m, out0, out2); \
out1 = (v8i16)__msa_ilvl_d((v2i64)s6_m, (v2i64)s4_m); \
out3 = (v8i16)__msa_ilvl_d((v2i64)s7_m, (v2i64)s5_m); \
- } while (0)
+ }
#define EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in) \
({ \
})
#define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 a1_m, b1_m, c1_m, d1_m; \
v8i16 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
v8i16 const_cospi8sqrt2minus1_m; \
d_tmp2_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3); \
d1_m = d_tmp1_m + d_tmp2_m; \
BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
- } while (0)
+ }
#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v4i32 a1_m, b1_m, c1_m, d1_m; \
v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
v4i32 const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m; \
d_tmp2_m = (in3 * sinpi8_sqrt2_m) >> 16; \
d1_m = d_tmp1_m + d_tmp2_m; \
BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
- } while (0)
+ }
static void idct4x4_addblk_msa(int16_t *input, uint8_t *pred,
int32_t pred_stride, uint8_t *dest,
#include "vp8/common/mips/msa/vp8_macros_msa.h"
#define VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask) \
- do { \
+ { \
v16u8 p1_a_sub_q1, p0_a_sub_q0; \
\
p0_a_sub_q0 = __msa_asub_u_b(p0, q0); \
p0_a_sub_q0 = __msa_adds_u_b(p0_a_sub_q0, p0_a_sub_q0); \
mask = __msa_adds_u_b(p0_a_sub_q0, p1_a_sub_q1); \
mask = ((v16u8)mask <= b_limit); \
- } while (0)
+ }
#define VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev) \
- do { \
+ { \
v16i8 p1_m, p0_m, q0_m, q1_m, filt, q0_sub_p0, t1, t2; \
const v16i8 cnst4b = __msa_ldi_b(4); \
const v16i8 cnst3b = __msa_ldi_b(3); \
q1 = __msa_xori_b((v16u8)q1_m, 0x80); \
p1_m = __msa_adds_s_b(p1_m, filt); \
p1 = __msa_xori_b((v16u8)p1_m, 0x80); \
- } while (0)
+ }
#define VP8_SIMPLE_FILT(p1_in, p0_in, q0_in, q1_in, mask) \
- do { \
+ { \
v16i8 p1_m, p0_m, q0_m, q1_m, filt, filt1, filt2; \
v16i8 q0_sub_p0; \
const v16i8 cnst4b = __msa_ldi_b(4); \
p0_m = __msa_adds_s_b(p0_m, filt2); \
q0_in = __msa_xori_b((v16u8)q0_m, 0x80); \
p0_in = __msa_xori_b((v16u8)p0_m, 0x80); \
- } while (0)
+ }
#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \
- do { \
+ { \
v16i8 p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \
v16i8 u, filt, t1, t2, filt_sign, q0_sub_p0; \
v8i16 filt_r, filt_l, u_r, u_l; \
p0_m = __msa_adds_s_b(p0_m, u); \
q0 = __msa_xori_b((v16u8)q0_m, 0x80); \
p0 = __msa_xori_b((v16u8)p0_m, 0x80); \
- } while (0)
+ }
#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
limit_in, b_limit_in, thresh_in, hev_out, mask_out, \
flat_out) \
- do { \
+ { \
v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
\
mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \
mask_out = (limit_in) < (v16u8)mask_out; \
mask_out = __msa_xori_b(mask_out, 0xff); \
- } while (0)
+ }
#define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) \
- do { \
+ { \
uint16_t tmp0_h; \
uint32_t tmp0_w; \
\
tmp0_h = __msa_copy_u_h((v8i16)in1, in1_idx); \
SW(tmp0_w, pdst); \
SH(tmp0_h, pdst + stride); \
- } while (0)
+ }
static void loop_filter_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
const uint8_t *b_limit0_ptr,
#define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
mask2, filt0, filt1, filt2, out0, out1) \
- do { \
+ { \
v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \
\
VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \
DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \
VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \
DPADD_SB2_SH(vec4_m, vec5_m, filt2, filt2, out0, out1); \
- } while (0)
+ }
#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
mask2, filt0, filt1, filt2, out0, out1, \
out2, out3) \
- do { \
+ { \
v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
\
VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \
out0, out1, out2, out3); \
DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt2, filt2, filt2, filt2, \
out0, out1, out2, out3); \
- } while (0)
+ }
#define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \
({ \
#define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
filt0, filt1, out0, out1) \
- do { \
+ { \
v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \
\
VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \
DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \
VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \
DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \
- } while (0)
+ }
#define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
filt0, filt1, out0, out1, out2, out3) \
- do { \
+ { \
v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \
\
VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \
VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \
DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \
out0, out1, out2, out3); \
- } while (0)
+ }
static void common_hz_6t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride,
uint8_t *RESTRICT dst, int32_t dst_stride,
#endif // (__mips == 64)
#define SH(val, pdst) \
- do { \
+ { \
uint8_t *pdst_m = (uint8_t *)(pdst); \
const uint16_t val_m = (val); \
\
\
: [pdst_m] "=m"(*pdst_m) \
: [val_m] "r"(val_m)); \
- } while (0)
+ }
#define SW(val, pdst) \
- do { \
+ { \
uint8_t *pdst_m = (uint8_t *)(pdst); \
const uint32_t val_m = (val); \
\
\
: [pdst_m] "=m"(*pdst_m) \
: [val_m] "r"(val_m)); \
- } while (0)
+ }
#define SD(val, pdst) \
- do { \
+ { \
uint8_t *pdst_m = (uint8_t *)(pdst); \
const uint64_t val_m = (val); \
\
\
: [pdst_m] "=m"(*pdst_m) \
: [val_m] "r"(val_m)); \
- } while (0)
+ }
#else // !(__mips_isa_rev >= 6)
#define LW(psrc) \
({ \
})
#endif // (__mips == 64)
#define SH(val, pdst) \
- do { \
+ { \
uint8_t *pdst_m = (uint8_t *)(pdst); \
const uint16_t val_m = (val); \
\
\
: [pdst_m] "=m"(*pdst_m) \
: [val_m] "r"(val_m)); \
- } while (0)
+ }
#define SW(val, pdst) \
- do { \
+ { \
uint8_t *pdst_m = (uint8_t *)(pdst); \
const uint32_t val_m = (val); \
\
\
: [pdst_m] "=m"(*pdst_m) \
: [val_m] "r"(val_m)); \
- } while (0)
+ }
#define SD(val, pdst) \
- do { \
+ { \
uint8_t *pdst_m1 = (uint8_t *)(pdst); \
uint32_t val0_m, val1_m; \
\
\
SW(val0_m, pdst_m1); \
SW(val1_m, pdst_m1 + 4); \
- } while (0)
+ }
#endif // (__mips_isa_rev >= 6)
/* Description : Load 4 words with stride
Load word in 'out3' from (psrc + 3 * stride)
*/
#define LW4(psrc, stride, out0, out1, out2, out3) \
- do { \
+ { \
out0 = LW((psrc)); \
out1 = LW((psrc) + stride); \
out2 = LW((psrc) + 2 * stride); \
out3 = LW((psrc) + 3 * stride); \
- } while (0)
+ }
/* Description : Load double words with stride
Arguments : Inputs - psrc, stride
Load double word in 'out1' from (psrc + stride)
*/
#define LD2(psrc, stride, out0, out1) \
- do { \
+ { \
out0 = LD((psrc)); \
out1 = LD((psrc) + stride); \
- } while (0)
+ }
#define LD4(psrc, stride, out0, out1, out2, out3) \
- do { \
+ { \
LD2((psrc), stride, out0, out1); \
LD2((psrc) + 2 * stride, stride, out2, out3); \
- } while (0)
+ }
/* Description : Store 4 words with stride
Arguments : Inputs - in0, in1, in2, in3, pdst, stride
Store word from 'in3' to (pdst + 3 * stride)
*/
#define SW4(in0, in1, in2, in3, pdst, stride) \
- do { \
+ { \
SW(in0, (pdst)); \
SW(in1, (pdst) + stride); \
SW(in2, (pdst) + 2 * stride); \
SW(in3, (pdst) + 3 * stride); \
- } while (0)
+ }
/* Description : Store 4 double words with stride
Arguments : Inputs - in0, in1, in2, in3, pdst, stride
Store double word from 'in3' to (pdst + 3 * stride)
*/
#define SD4(in0, in1, in2, in3, pdst, stride) \
- do { \
+ { \
SD(in0, (pdst)); \
SD(in1, (pdst) + stride); \
SD(in2, (pdst) + 2 * stride); \
SD(in3, (pdst) + 3 * stride); \
- } while (0)
+ }
/* Description : Load vectors with 16 byte elements with stride
Arguments : Inputs - psrc, stride
Load 16 byte elements in 'out1' from (psrc + stride)
*/
#define LD_B2(RTYPE, psrc, stride, out0, out1) \
- do { \
+ { \
out0 = LD_B(RTYPE, (psrc)); \
out1 = LD_B(RTYPE, (psrc) + stride); \
- } while (0)
+ }
#define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
#define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) \
- do { \
+ { \
LD_B2(RTYPE, (psrc), stride, out0, out1); \
out2 = LD_B(RTYPE, (psrc) + 2 * stride); \
- } while (0)
+ }
#define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__)
#define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__)
#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \
- do { \
+ { \
LD_B2(RTYPE, (psrc), stride, out0, out1); \
LD_B2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
- } while (0)
+ }
#define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
#define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \
- do { \
+ { \
LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
out4 = LD_B(RTYPE, (psrc) + 4 * stride); \
- } while (0)
+ }
#define LD_UB5(...) LD_B5(v16u8, __VA_ARGS__)
#define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__)
#define LD_B8(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \
out7) \
- do { \
+ { \
LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
LD_B4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \
- } while (0)
+ }
#define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__)
#define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__)
Load 8 halfword elements in 'out1' from (psrc + stride)
*/
#define LD_H2(RTYPE, psrc, stride, out0, out1) \
- do { \
+ { \
out0 = LD_H(RTYPE, (psrc)); \
out1 = LD_H(RTYPE, (psrc) + (stride)); \
- } while (0)
+ }
#define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) \
- do { \
+ { \
LD_H2(RTYPE, (psrc), stride, out0, out1); \
LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
- } while (0)
+ }
#define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
/* Description : Load 2 vectors of signed word elements with stride
Return Type - signed word
*/
#define LD_SW2(psrc, stride, out0, out1) \
- do { \
+ { \
out0 = LD_SW((psrc)); \
out1 = LD_SW((psrc) + stride); \
- } while (0)
+ }
/* Description : Store vectors of 16 byte elements with stride
Arguments : Inputs - in0, in1, pdst, stride
Store 16 byte elements from 'in1' to (pdst + stride)
*/
#define ST_B2(RTYPE, in0, in1, pdst, stride) \
- do { \
+ { \
ST_B(RTYPE, in0, (pdst)); \
ST_B(RTYPE, in1, (pdst) + stride); \
- } while (0)
+ }
#define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \
- do { \
+ { \
ST_B2(RTYPE, in0, in1, (pdst), stride); \
ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
- } while (0)
+ }
#define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
#define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__)
#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
- do { \
+ { \
ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
- } while (0)
+ }
#define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__)
/* Description : Store vectors of 8 halfword elements with stride
Store 8 halfword elements from 'in1' to (pdst + stride)
*/
#define ST_H2(RTYPE, in0, in1, pdst, stride) \
- do { \
+ { \
ST_H(RTYPE, in0, (pdst)); \
ST_H(RTYPE, in1, (pdst) + stride); \
- } while (0)
+ }
#define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
/* Description : Store vectors of word elements with stride
Store 4 word elements from 'in1' to (pdst + stride)
*/
#define ST_SW2(in0, in1, pdst, stride) \
- do { \
+ { \
ST_SW(in0, (pdst)); \
ST_SW(in1, (pdst) + stride); \
- } while (0)
+ }
/* Description : Store 2x4 byte block to destination memory from input vector
Arguments : Inputs - in, stidx, pdst, stride
the GP register and stored to (pdst + 3 * stride)
*/
#define ST2x4_UB(in, stidx, pdst, stride) \
- do { \
+ { \
uint16_t out0_m, out1_m, out2_m, out3_m; \
uint8_t *pblk_2x4_m = (uint8_t *)(pdst); \
\
SH(out1_m, pblk_2x4_m + stride); \
SH(out2_m, pblk_2x4_m + 2 * stride); \
SH(out3_m, pblk_2x4_m + 3 * stride); \
- } while (0)
+ }
/* Description : Store 4x4 byte block to destination memory from input vector
Arguments : Inputs - in0, in1, pdst, stride
GP register and stored to (pdst + 3 * stride)
*/
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
- do { \
+ { \
uint32_t out0_m, out1_m, out2_m, out3_m; \
uint8_t *pblk_4x4_m = (uint8_t *)(pdst); \
\
out3_m = __msa_copy_u_w((v4i32)in1, idx3); \
\
SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \
- } while (0)
+ }
#define ST4x8_UB(in0, in1, pdst, stride) \
- do { \
+ { \
uint8_t *pblk_4x8 = (uint8_t *)(pdst); \
\
ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \
ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \
- } while (0)
+ }
/* Description : Store 8x1 byte block to destination memory from input vector
Arguments : Inputs - in, pdst
GP register and stored to (pdst)
*/
#define ST8x1_UB(in, pdst) \
- do { \
+ { \
uint64_t out0_m; \
\
out0_m = __msa_copy_u_d((v2i64)in, 0); \
SD(out0_m, pdst); \
- } while (0)
+ }
/* Description : Store 8x2 byte block to destination memory from input vector
Arguments : Inputs - in, pdst, stride
GP register and stored to (pdst + stride)
*/
#define ST8x2_UB(in, pdst, stride) \
- do { \
+ { \
uint64_t out0_m, out1_m; \
uint8_t *pblk_8x2_m = (uint8_t *)(pdst); \
\
\
SD(out0_m, pblk_8x2_m); \
SD(out1_m, pblk_8x2_m + stride); \
- } while (0)
+ }
/* Description : Store 8x4 byte block to destination memory from input
vectors
GP register and stored to (pdst + 3 * stride)
*/
#define ST8x4_UB(in0, in1, pdst, stride) \
- do { \
+ { \
uint64_t out0_m, out1_m, out2_m, out3_m; \
uint8_t *pblk_8x4_m = (uint8_t *)(pdst); \
\
out3_m = __msa_copy_u_d((v2i64)in1, 1); \
\
SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \
- } while (0)
+ }
/* Description : Immediate number of elements to slide with zero
Arguments : Inputs - in0, in1, slide_val
value specified in the 'slide_val'
*/
#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) \
- do { \
+ { \
v16i8 zero_m = { 0 }; \
\
out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \
out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \
- } while (0)
+ }
#define SLDI_B2_0_UB(...) SLDI_B2_0(v16u8, __VA_ARGS__)
/* Description : Immediate number of elements to slide
value specified in the 'slide_val'
*/
#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \
- do { \
+ { \
out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val); \
out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val); \
- } while (0)
+ }
#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, out0, out1, \
out2, slide_val) \
- do { \
+ { \
SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val); \
out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val); \
- } while (0)
+ }
#define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__)
/* Description : Shuffle byte vector elements as per mask vector
'out0' as per control vector 'mask0'
*/
#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \
out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
- } while (0)
+ }
#define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
#define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
#define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
#define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
out0, out1, out2) \
- do { \
+ { \
VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
out2 = (RTYPE)__msa_vshf_b((v16i8)mask2, (v16i8)in5, (v16i8)in4); \
- } while (0)
+ }
#define VSHF_B3_SB(...) VSHF_B3(v16i8, __VA_ARGS__)
/* Description : Shuffle halfword vector elements as per mask vector
'out0' as per control vector 'mask0'
*/
#define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \
out1 = (RTYPE)__msa_vshf_h((v8i16)mask1, (v8i16)in3, (v8i16)in2); \
- } while (0)
+ }
#define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
/* Description : Dot product of byte vector elements
are added together and written to the 'out0' vector
*/
#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0); \
out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1); \
- } while (0)
+ }
#define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__)
#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__)
/* Description : Dot product of byte vector elements
are added together and written to the 'out0' vector
*/
#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \
out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \
- } while (0)
+ }
#define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__)
/* Description : Dot product of halfword vector elements
are added together and written to the 'out0' vector
*/
#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \
out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \
- } while (0)
+ }
#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
/* Description : Dot product of word vector elements
are added together and written to the 'out0' vector
*/
#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0); \
out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1); \
- } while (0)
+ }
#define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
/* Description : Dot product & addition of byte vector elements
are added to the 'out0' vector
*/
#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \
out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \
- } while (0)
+ }
#define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__)
#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__)
/* Description : Dot product & addition of halfword vector elements
are added to the 'out0' vector
*/
#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \
out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \
- } while (0)
+ }
#define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
#define DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DPADD_SH4_SW(...) DPADD_SH4(v4i32, __VA_ARGS__)
/* Description : Dot product & addition of double word vector elements
are added to the 'out0' vector
*/
#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \
out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \
- } while (0)
+ }
#define DPADD_SD2_SD(...) DPADD_SD2(v2i64, __VA_ARGS__)
/* Description : Clips all signed halfword elements of input vector
out_m; \
})
#define CLIP_SH2_0_255(in0, in1) \
- do { \
+ { \
in0 = CLIP_SH_0_255(in0); \
in1 = CLIP_SH_0_255(in1); \
- } while (0)
+ }
#define CLIP_SH4_0_255(in0, in1, in2, in3) \
- do { \
+ { \
CLIP_SH2_0_255(in0, in1); \
CLIP_SH2_0_255(in2, in3); \
- } while (0)
+ }
/* Description : Clips all signed word elements of input vector
between 0 & 255
halfword result is written to 'out0'
*/
#define HADD_UB2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0); \
out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1); \
- } while (0)
+ }
#define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__)
/* Description : Horizontal subtraction of unsigned byte vector elements
halfword result is written to 'out0'
*/
#define HSUB_UB2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \
out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \
- } while (0)
+ }
#define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
/* Description : Horizontal subtraction of signed halfword vector elements
word result is written to 'out0'
*/
#define HSUB_UH2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0); \
out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1); \
- } while (0)
+ }
#define HSUB_UH2_SW(...) HSUB_UH2(v4i32, __VA_ARGS__)
/* Description : Set element n input vector to GPR value
Details : Set element 0 in vector 'out' to value specified in 'in0'
*/
#define INSERT_D2(RTYPE, in0, in1, out) \
- do { \
+ { \
out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \
out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \
- } while (0)
+ }
#define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
/* Description : Interleave even byte elements from vectors
and written to 'out0'
*/
#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \
out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \
- } while (0)
+ }
#define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
#define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
#define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
and written to 'out0'
*/
#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \
- } while (0)
+ }
#define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
#define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
and written to 'out0'
*/
#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \
- } while (0)
+ }
#define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
/* Description : Interleave even double word elements from vectors
and written to 'out0'
*/
#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \
out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \
- } while (0)
+ }
#define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
/* Description : Interleave left half of byte elements from vectors
and written to 'out0'.
*/
#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \
- } while (0)
+ }
#define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
#define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
#define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__)
#define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__)
interleaved and written to 'out0'.
*/
#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \
- } while (0)
+ }
#define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
#define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__)
and written to 'out0'.
*/
#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \
- } while (0)
+ }
#define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
/* Description : Interleave right half of byte elements from vectors
and written to out0.
*/
#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \
- } while (0)
+ }
#define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
#define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
#define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
#define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
#define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
interleaved and written to 'out0'.
*/
#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \
- } while (0)
+ }
#define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
#define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
#define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \
- } while (0)
+ }
#define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
/* Description : Interleave right half of double word elements from vectors
interleaved and written to 'out0'.
*/
#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \
out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \
- } while (0)
+ }
#define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
#define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
#define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
#define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
interleaved and written to 'out0'
*/
#define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
- } while (0)
+ }
#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
#define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
#define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
#define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
#define ILVRL_H2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
- } while (0)
+ }
#define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
#define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
#define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
- } while (0)
+ }
#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
#define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
#define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
'max_val' are written in place
*/
#define MAXI_SH2(RTYPE, in0, in1, max_val) \
- do { \
+ { \
in0 = (RTYPE)__msa_maxi_s_h((v8i16)in0, (max_val)); \
in1 = (RTYPE)__msa_maxi_s_h((v8i16)in1, (max_val)); \
- } while (0)
+ }
#define MAXI_SH2_SH(...) MAXI_SH2(v8i16, __VA_ARGS__)
/* Description : Saturate the halfword element values to the max
The results are written in place
*/
#define SAT_UH2(RTYPE, in0, in1, sat_val) \
- do { \
+ { \
in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val); \
in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val); \
- } while (0)
+ }
#define SAT_UH2_SH(...) SAT_UH2(v8i16, __VA_ARGS__)
/* Description : Saturate the halfword element values to the max
The results are written in place
*/
#define SAT_SH2(RTYPE, in0, in1, sat_val) \
- do { \
+ { \
in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \
in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \
- } while (0)
+ }
#define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__)
#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \
- do { \
+ { \
SAT_SH2(RTYPE, in0, in1, sat_val); \
SAT_SH2(RTYPE, in2, in3, sat_val); \
- } while (0)
+ }
#define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__)
/* Description : Indexed halfword element values are replicated to all
Valid index range for halfword operation is 0-7
*/
#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0); \
out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1); \
- } while (0)
+ }
#define SPLATI_H2_SB(...) SPLATI_H2(v16i8, __VA_ARGS__)
#define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__)
#define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, out0, out1, out2) \
- do { \
+ { \
SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
out2 = (RTYPE)__msa_splati_h((v8i16)in, idx2); \
- } while (0)
+ }
#define SPLATI_H3_SB(...) SPLATI_H3(v16i8, __VA_ARGS__)
#define SPLATI_H3_SH(...) SPLATI_H3(v8i16, __VA_ARGS__)
Valid index range for word operation is 0-3
*/
#define SPLATI_W2(RTYPE, in, stidx, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_splati_w((v4i32)in, stidx); \
out1 = (RTYPE)__msa_splati_w((v4i32)in, (stidx + 1)); \
- } while (0)
+ }
#define SPLATI_W2_SW(...) SPLATI_W2(v4i32, __VA_ARGS__)
/* Description : Pack even byte elements of vector pairs
half of 'out0'.
*/
#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \
- } while (0)
+ }
#define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
#define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
#define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
#define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
#define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
right half of 'out0'.
*/
#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \
- } while (0)
+ }
#define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
/* Description : Pack even double word elements of vector pairs
half of 'out0'.
*/
#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \
out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \
- } while (0)
+ }
#define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
#define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
the right half of 'out0'.
*/
#define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_pckod_d((v2i64)in0, (v2i64)in1); \
out1 = (RTYPE)__msa_pckod_d((v2i64)in2, (v2i64)in3); \
- } while (0)
+ }
#define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__)
#define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__)
logically xor'ed with 128 and the result is stored in-place.
*/
#define XORI_B2_128(RTYPE, in0, in1) \
- do { \
+ { \
in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \
in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \
- } while (0)
+ }
#define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__)
#define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__)
#define XORI_B3_128(RTYPE, in0, in1, in2) \
- do { \
+ { \
XORI_B2_128(RTYPE, in0, in1); \
in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \
- } while (0)
+ }
#define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__)
#define XORI_B4_128(RTYPE, in0, in1, in2, in3) \
- do { \
+ { \
XORI_B2_128(RTYPE, in0, in1); \
XORI_B2_128(RTYPE, in2, in3); \
- } while (0)
+ }
#define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__)
#define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__)
#define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \
- do { \
+ { \
XORI_B3_128(RTYPE, in0, in1, in2); \
XORI_B2_128(RTYPE, in3, in4); \
- } while (0)
+ }
#define XORI_B5_128_SB(...) XORI_B5_128(v16i8, __VA_ARGS__)
#define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \
- do { \
+ { \
XORI_B4_128(RTYPE, in0, in1, in2, in3); \
XORI_B4_128(RTYPE, in4, in5, in6, in7); \
- } while (0)
+ }
#define XORI_B8_128_SB(...) XORI_B8_128(v16i8, __VA_ARGS__)
/* Description : Shift left all elements of vector (generic for all data types)
the result is written in-place.
*/
#define SLLI_4V(in0, in1, in2, in3, shift) \
- do { \
+ { \
in0 = in0 << shift; \
in1 = in1 << shift; \
in2 = in2 << shift; \
in3 = in3 << shift; \
- } while (0)
+ }
/* Description : Arithmetic shift right all elements of vector
(generic for all data types)
the result is written in-place. 'shift' is a GP variable.
*/
#define SRA_4V(in0, in1, in2, in3, shift) \
- do { \
+ { \
in0 = in0 >> shift; \
in1 = in1 >> shift; \
in2 = in2 >> shift; \
in3 = in3 >> shift; \
- } while (0)
+ }
/* Description : Shift right arithmetic rounded words
Arguments : Inputs - in0, in1, shift
'shift' is a vector.
*/
#define SRAR_W2(RTYPE, in0, in1, shift) \
- do { \
+ { \
in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \
in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \
- } while (0)
+ }
#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \
- do { \
+ { \
SRAR_W2(RTYPE, in0, in1, shift); \
SRAR_W2(RTYPE, in2, in3, shift); \
- } while (0)
+ }
#define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
/* Description : Shift right arithmetic rounded (immediate)
'shift' is an immediate value.
*/
#define SRARI_H2(RTYPE, in0, in1, shift) \
- do { \
+ { \
in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \
in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \
- } while (0)
+ }
#define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
#define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \
- do { \
+ { \
SRARI_H2(RTYPE, in0, in1, shift); \
SRARI_H2(RTYPE, in2, in3, shift); \
- } while (0)
+ }
#define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__)
#define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__)
#define SRARI_W2(RTYPE, in0, in1, shift) \
- do { \
+ { \
in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \
in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \
- } while (0)
+ }
#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
- do { \
+ { \
SRARI_W2(RTYPE, in0, in1, shift); \
SRARI_W2(RTYPE, in2, in3, shift); \
- } while (0)
+ }
#define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
/* Description : Multiplication of pairs of vectors
and the result is written to 'out0'
*/
#define MUL2(in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = in0 * in1; \
out1 = in2 * in3; \
- } while (0)
+ }
#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
- do { \
+ { \
MUL2(in0, in1, in2, in3, out0, out1); \
MUL2(in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
/* Description : Addition of 2 pairs of vectors
Arguments : Inputs - in0, in1, in2, in3
to 'out0'.
*/
#define ADD2(in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = in0 + in1; \
out1 = in2 + in3; \
- } while (0)
+ }
#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
- do { \
+ { \
ADD2(in0, in1, in2, in3, out0, out1); \
ADD2(in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
/* Description : Subtraction of 2 pairs of vectors
Arguments : Inputs - in0, in1, in2, in3
written to 'out0'.
*/
#define SUB2(in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = in0 - in1; \
out1 = in2 - in3; \
- } while (0)
+ }
#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
- do { \
+ { \
out0 = in0 - in1; \
out1 = in2 - in3; \
out2 = in4 - in5; \
out3 = in6 - in7; \
- } while (0)
+ }
/* Description : Sign extend halfword elements from right half of the vector
Arguments : Input - in (halfword vector)
4 word elements keeping sign intact
*/
#define UNPCK_R_SH_SW(in, out) \
- do { \
+ { \
v8i16 sign_m; \
\
sign_m = __msa_clti_s_h((v8i16)in, 0); \
out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \
- } while (0)
+ }
/* Description : Zero extend unsigned byte elements to halfword elements
Arguments : Input - in (unsigned byte vector)
Zero extended left half of vector is returned in 'out1'
*/
#define UNPCK_UB_SH(in, out0, out1) \
- do { \
+ { \
v16i8 zero_m = { 0 }; \
\
ILVRL_B2_SH(zero_m, in, out0, out1); \
- } while (0)
+ }
/* Description : Sign extend halfword elements from input vector and return
the result in pair of vectors
generate 4 signed word elements in 'out1'
*/
#define UNPCK_SH_SW(in, out0, out1) \
- do { \
+ { \
v8i16 tmp_m; \
\
tmp_m = __msa_clti_s_h((v8i16)in, 0); \
ILVRL_H2_SW(tmp_m, in, out0, out1); \
- } while (0)
+ }
/* Description : Butterfly of 4 input vectors
Arguments : Inputs - in0, in1, in2, in3
Details : Butterfly operation
*/
#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
out0 = in0 + in3; \
out1 = in1 + in2; \
\
out2 = in1 - in2; \
out3 = in0 - in3; \
- } while (0)
+ }
/* Description : Transpose input 8x8 byte block
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
*/
#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, \
out1, out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
\
ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \
SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8); \
SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8); \
- } while (0)
+ }
#define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__)
/* Description : Transpose 16x4 block into 4x16 with byte elements in vectors
#define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
in10, in11, in12, in13, in14, in15, out0, out1, \
out2, out3) \
- do { \
+ { \
v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
\
ILVEV_W2_SD(in0, in4, in8, in12, tmp0_m, tmp1_m); \
tmp1_m = (v2i64)__msa_ilvod_b((v16i8)tmp3_m, (v16i8)tmp2_m); \
out1 = (v16u8)__msa_ilvev_h((v8i16)tmp1_m, (v8i16)tmp0_m); \
out3 = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \
- } while (0)
+ }
/* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
in10, in11, in12, in13, in14, in15, out0, out1, \
out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
\
tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \
out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
- } while (0)
+ }
/* Description : Transpose 4x4 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3
Return Type - signed halfword
*/
#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 s0_m, s1_m; \
\
ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
ILVRL_W2_SH(s1_m, s0_m, out0, out2); \
out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2); \
- } while (0)
+ }
/* Description : Transpose 8x4 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
Return Type - signed halfword
*/
#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
\
ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m); \
ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m); \
ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \
ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \
- } while (0)
+ }
/* Description : Transpose 4x4 block with word elements in vectors
Arguments : Inputs - in0, in1, in2, in3
Return Type - signed word
*/
#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v4i32 s0_m, s1_m, s2_m, s3_m; \
\
ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \
out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \
out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \
- } while (0)
+ }
/* Description : Dot product and addition of 3 signed halfword input vectors
Arguments : Inputs - in0, in1, in2, coeff0, coeff1, coeff2
Arguments : Inputs - in0, in1, pdst
*/
#define PCKEV_ST_SB(in0, in1, pdst) \
- do { \
+ { \
v16i8 tmp_m; \
tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0); \
ST_SB(tmp_m, (pdst)); \
- } while (0)
+ }
/* Description : Horizontal 2 tap filter kernel code
Arguments : Inputs - in0, in1, mask, coeff, shift
}
#define CHECK_BOUNDS(range) \
- do { \
+ { \
all_in = 1; \
all_in &= ((br - range) >= x->mv_row_min); \
all_in &= ((br + range) <= x->mv_row_max); \
all_in &= ((bc - range) >= x->mv_col_min); \
all_in &= ((bc + range) <= x->mv_col_max); \
- } while (0)
+ }
#define CHECK_POINT \
- do { \
+ { \
if (this_mv.as_mv.col < x->mv_col_min) continue; \
if (this_mv.as_mv.col > x->mv_col_max) continue; \
if (this_mv.as_mv.row < x->mv_row_min) continue; \
if (this_mv.as_mv.row > x->mv_row_max) continue; \
- } while (0)
+ }
#define CHECK_BETTER \
- do { \
+ { \
if (thissad < bestsad) { \
thissad += \
mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); \
best_site = i; \
} \
} \
- } while (0)
+ }
static const MV next_chkpts[6][3] = {
{ { -2, 0 }, { -1, -2 }, { 1, -2 } }, { { -1, -2 }, { 1, -2 }, { 2, 0 } },
#endif
/* hex search */
- CHECK_BOUNDS(2);
+ CHECK_BOUNDS(2)
if (all_in) {
for (i = 0; i < 6; ++i) {
this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < 6; ++i) {
this_mv.as_mv.row = br + hex[i].row;
this_mv.as_mv.col = bc + hex[i].col;
- CHECK_POINT;
+ CHECK_POINT
this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
for (j = 1; j < hex_range; ++j) {
best_site = -1;
- CHECK_BOUNDS(2);
+ CHECK_BOUNDS(2)
if (all_in) {
for (i = 0; i < 3; ++i) {
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < 3; ++i) {
this_mv.as_mv.row = br + next_chkpts[k][i].row;
this_mv.as_mv.col = bc + next_chkpts[k][i].col;
- CHECK_POINT;
+ CHECK_POINT
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
cal_neighbors:
for (j = 0; j < dia_range; ++j) {
best_site = -1;
- CHECK_BOUNDS(1);
+ CHECK_BOUNDS(1)
if (all_in) {
for (i = 0; i < 4; ++i) {
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < 4; ++i) {
this_mv.as_mv.row = br + neighbors[i].row;
this_mv.as_mv.col = bc + neighbors[i].col;
- CHECK_POINT;
+ CHECK_POINT
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
this_mv.as_mv.col;
thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
#include "vp8/common/mips/msa/vp8_macros_msa.h"
#define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 s0_m, s1_m, tp0_m, tp1_m, tp2_m, tp3_m; \
\
ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
ILVRL_H2_SH(s1_m, s0_m, tp2_m, tp3_m); \
PCKEV_D2_SH(tp2_m, tp0_m, tp3_m, tp1_m, out0, out2); \
PCKOD_D2_SH(tp2_m, tp0_m, tp3_m, tp1_m, out1, out3); \
- } while (0)
+ }
#define SET_DOTP_VALUES(coeff, val0, val1, val2, const1, const2) \
- do { \
+ { \
v8i16 tmp0_m; \
\
SPLATI_H3_SH(coeff, val0, val1, val2, tmp0_m, const1, const2); \
ILVEV_H2_SH(tmp0_m, const1, const2, tmp0_m, const1, const2); \
- } while (0)
+ }
#define RET_1_IF_NZERO_H(in0) \
({ \
mod3_w = diff1_l & mod3_w;
MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w,
filter_wt, mod0_w, mod1_w, mod2_w, mod3_w);
- PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h);
+ PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h)
ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
ST_SH2(mod0_h, mod1_h, cnt, 8);
cnt += 16;
// Only need this for fixed-size arrays, for structs just assign.
#define vp9_copy(dest, src) \
- do { \
+ { \
assert(sizeof(dest) == sizeof(src)); \
memcpy(dest, src, sizeof(src)); \
- } while (0)
+ }
// Use this for variably-sized arrays.
#define vp9_copy_array(dest, src, n) \
- do { \
+ { \
assert(sizeof(*(dest)) == sizeof(*(src))); \
memcpy(dest, src, (n) * sizeof(*(src))); \
- } while (0)
+ }
#define vp9_zero(dest) memset(&(dest), 0, sizeof(dest))
#define vp9_zero_array(dest, n) memset(dest, 0, (n) * sizeof(*(dest)))
#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
out3, out4, out5, out6, out7) \
- do { \
+ { \
v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
out1 = -out1; \
out3 = -out3; \
out5 = -out5; \
- } while (0)
+ }
#define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \
v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \
\
SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \
PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
out0, out1, out2, out3); \
- } while (0)
+ }
#endif // VPX_VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_
#endif
#define FIRST_LEVEL_CHECKS \
- do { \
+ { \
unsigned int left, right, up, down, diag; \
CHECK_BETTER(left, tr, tc - hstep); \
CHECK_BETTER(right, tr, tc + hstep); \
case 2: CHECK_BETTER(diag, tr + hstep, tc - hstep); break; \
case 3: CHECK_BETTER(diag, tr + hstep, tc + hstep); break; \
} \
- } while (0)
+ }
#define SECOND_LEVEL_CHECKS \
- do { \
+ { \
int kr, kc; \
unsigned int second; \
if (tr != br && tc != bc) { \
case 3: CHECK_BETTER(second, tr + kr, tc - hstep); break; \
} \
} \
- } while (0)
+ }
#define SETUP_SUBPEL_SEARCH \
const uint8_t *const z = x->plane[0].src.buf; \
}
#define CHECK_BETTER \
- do { \
+ { \
if (thissad < bestsad) { \
if (use_mvcost) \
thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); \
best_site = i; \
} \
} \
- } while (0)
+ }
#define MAX_PATTERN_SCALES 11
#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[t]; i++) {
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
if (best_site == -1) {
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[t]; i++) {
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
if (best_site == -1) {
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
cost_list[i + 1] = thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
cost_list[i + 1] = thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
cost_list[next_chkpts_indices[i] + 1] = thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
cost_list[next_chkpts_indices[i] + 1] = thissad =
vfp->sdf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv), in_what->stride);
- CHECK_BETTER;
+ CHECK_BETTER
}
}
#define VPX_TRANSPOSE8x16_UB_UB( \
in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, \
out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15) \
- do { \
+ { \
v8i16 temp0, temp1, temp2, temp3, temp4; \
v8i16 temp5, temp6, temp7, temp8, temp9; \
\
out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4); \
out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \
- } while (0)
+ }
#define VPX_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \
ref, out) \
- do { \
+ { \
v16u8 temp0, temp1; \
\
temp1 = __msa_aver_u_b(above2_in, above1_in); \
temp1 = (temp1 < ref); \
temp0 = temp0 & temp1; \
out = __msa_bmz_v(out, src_in, temp0); \
- } while (0)
+ }
#define TRANSPOSE12x16_B(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
in10, in11, in12, in13, in14, in15) \
- do { \
+ { \
v8i16 temp0, temp1, temp2, temp3, temp4; \
v8i16 temp5, temp6, temp7, temp8, temp9; \
\
ILVL_W2_SH(temp7, temp6, temp9, temp8, temp2, temp3); \
in10 = (v16u8)__msa_ilvr_d((v2i64)temp3, (v2i64)temp2); \
in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2); \
- } while (0)
+ }
#define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \
in9, in10, in11) \
- do { \
+ { \
v8i16 temp0, temp1, temp2, temp3; \
v8i16 temp4, temp5, temp6, temp7; \
\
in7 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp3); \
in9 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp6); \
in11 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp7); \
- } while (0)
+ }
static void postproc_down_across_chroma_msa(uint8_t *src_ptr, uint8_t *dst_ptr,
int32_t src_stride,
SUB2(in9, vec2, in14, vec5, vec2, vec5);
DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1);
- SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5);
+ SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5)
DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
ST_SH(temp0, out + 80);
#include "vpx_dsp/txfm_common.h"
#define VP9_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m; \
v8i16 vec0_m, vec1_m, vec2_m, vec3_m; \
v4i32 vec4_m, vec5_m, vec6_m, vec7_m; \
SRARI_W4_SW(vec4_m, vec5_m, vec6_m, vec7_m, DCT_CONST_BITS); \
PCKEV_H4_SH(vec4_m, vec4_m, vec5_m, vec5_m, vec6_m, vec6_m, vec7_m, \
vec7_m, out0, out2, out1, out3); \
- } while (0)
+ }
#define SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) \
- do { \
+ { \
v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
\
SRLI_H4_SH(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m, 15); \
in2, in3); \
AVE_SH4_SH(vec4_m, in4, vec5_m, in5, vec6_m, in6, vec7_m, in7, in4, in5, \
in6, in7); \
- } while (0)
+ }
#define VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
out3, out4, out5, out6, out7) \
- do { \
+ { \
v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m; \
v8i16 s7_m, x0_m, x1_m, x2_m, x3_m; \
v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
x3_m = -x3_m; \
x2_m = __msa_ilvev_h(x2_m, x3_m); \
out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
- } while (0)
+ }
#define FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \
v8i16 x0_m, x1_m, x2_m, x3_m; \
v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
x3_m = -x3_m; \
x2_m = __msa_ilvev_h(x2_m, x3_m); \
out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
- } while (0)
+ }
#define FDCT8x16_ODD(input0, input1, input2, input3, input4, input5, input6, \
input7, out1, out3, out5, out7, out9, out11, out13, \
out15) \
- do { \
+ { \
v8i16 stp21_m, stp22_m, stp23_m, stp24_m, stp25_m, stp26_m; \
v8i16 stp30_m, stp31_m, stp32_m, stp33_m, stp34_m, stp35_m; \
v8i16 stp36_m, stp37_m, vec0_m, vec1_m; \
cnst1_m = __msa_splati_h(coeff2_m, 3); \
cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \
out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
- } while (0)
+ }
#define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) \
- do { \
+ { \
v8i16 tp0_m, tp1_m; \
v8i16 one_m = __msa_ldi_h(1); \
\
vec1 += tp1_m; \
vec0 >>= 2; \
vec1 >>= 2; \
- } while (0)
+ }
#define FDCT32_POSTPROC_NEG_W(vec) \
- do { \
+ { \
v4i32 temp_m; \
v4i32 one_m = __msa_ldi_w(1); \
\
temp_m = one_m & temp_m; \
vec += temp_m; \
vec >>= 2; \
- } while (0)
+ }
#define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) \
- do { \
+ { \
v8i16 tp0_m, tp1_m; \
v8i16 one = __msa_ldi_h(1); \
\
vec1 += tp1_m; \
vec0 >>= 2; \
vec1 >>= 2; \
- } while (0)
+ }
#define DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right, reg1_right, \
const0, const1, out0, out1, out2, out3) \
- do { \
+ { \
v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \
v2i64 tp0_m, tp1_m, tp2_m, tp3_m; \
v4i32 k0_m = __msa_fill_w((int32_t)const0); \
tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS); \
out2 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m); \
out3 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m); \
- } while (0)
+ }
void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr,
int32_t src_stride);
#include "vpx_dsp/mips/macros_msa.h"
#define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) \
- do { \
+ { \
out0 = __msa_subs_u_h(out0, in0); \
out1 = __msa_subs_u_h(out1, in1); \
- } while (0)
+ }
static void intra_predict_vert_4x4_msa(const uint8_t *src, uint8_t *dst,
int32_t dst_stride) {
#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
out3, out4, out5, out6, out7) \
- do { \
+ { \
v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
out1 = -out1; \
out3 = -out3; \
out5 = -out5; \
- } while (0)
+ }
#define VP9_SET_COSPI_PAIR(c0_h, c1_h) \
({ \
})
#define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \
- do { \
+ { \
uint8_t *dst_m = (uint8_t *)(dst); \
v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
v16i8 tmp0_m, tmp1_m; \
CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m); \
PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m); \
ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride); \
- } while (0)
+ }
#define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 c0_m, c1_m, c2_m, c3_m; \
v8i16 step0_m, step1_m; \
v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
SLDI_B2_0_SW(tmp0_m, tmp2_m, tmp1_m, tmp3_m, 8); \
BUTTERFLY_4((v8i16)tmp0_m, (v8i16)tmp1_m, (v8i16)tmp2_m, (v8i16)tmp3_m, \
out0, out1, out2, out3); \
- } while (0)
+ }
#define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 res0_m, res1_m, c0_m, c1_m; \
v8i16 k1_m, k2_m, k3_m, k4_m; \
v8i16 zero_m = { 0 }; \
SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, DCT_CONST_BITS); \
PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1); \
PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3); \
- } while (0)
+ }
#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \
({ \
/* multiply and add macro */
#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \
out2, out3) \
- do { \
+ { \
v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
v4i32 tmp0_madd, tmp1_madd, tmp2_madd, tmp3_madd; \
\
cst3, tmp0_madd, tmp1_madd, tmp2_madd, tmp3_madd); \
SRARI_W4_SW(tmp0_madd, tmp1_madd, tmp2_madd, tmp3_madd, DCT_CONST_BITS); \
PCKEV_H2_SH(tmp1_madd, tmp0_madd, tmp3_madd, tmp2_madd, out2, out3); \
- } while (0)
+ }
/* idct 8x8 macro */
#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \
v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m; \
v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \
BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, out0, \
out1, out2, out3, out4, out5, out6, out7); \
- } while (0)
+ }
#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m; \
v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m; \
v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1; \
out3 = -in3; \
out5 = -in5; \
out7 = -in7; \
- } while (0)
+ }
#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, \
r12, r13, r14, r15, out0, out1, out2, out3, out4, \
out5, out6, out7, out8, out9, out10, out11, out12, \
out13, out14, out15) \
- do { \
+ { \
v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m; \
v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m; \
v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m; \
MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7); \
MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11); \
MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15); \
- } while (0)
+ }
void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
int32_t dst_stride);
#define VP9_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask, hev, p1_out, \
p0_out, q0_out, q1_out) \
- do { \
+ { \
v16i8 p1_m, p0_m, q0_m, q1_m, filt, q0_sub_p0, t1, t2; \
const v16i8 cnst4b = __msa_ldi_b(4); \
const v16i8 cnst3b = __msa_ldi_b(3); \
q1_out = __msa_xori_b((v16u8)q1_m, 0x80); \
p1_m = __msa_adds_s_b(p1_m, filt); \
p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \
- } while (0)
+ }
#define VP9_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) \
- do { \
+ { \
v16u8 tmp_flat4, p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0; \
v16u8 zero_in = { 0 }; \
\
flat_out = (tmp_flat4 < (v16u8)flat_out); \
flat_out = __msa_xori_b(flat_out, 0xff); \
flat_out = flat_out & (mask); \
- } while (0)
+ }
#define VP9_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in, q5_in, \
q6_in, q7_in, flat_in, flat2_out) \
- do { \
+ { \
v16u8 tmp_flat5, zero_in = { 0 }; \
v16u8 p4_a_sub_p0, q4_a_sub_q0, p5_a_sub_p0, q5_a_sub_q0; \
v16u8 p6_a_sub_p0, q6_a_sub_q0, p7_a_sub_p0, q7_a_sub_q0; \
flat2_out = (tmp_flat5 < (v16u8)flat2_out); \
flat2_out = __msa_xori_b(flat2_out, 0xff); \
flat2_out = flat2_out & flat_in; \
- } while (0)
+ }
#define VP9_FILTER8(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
p2_filt8_out, p1_filt8_out, p0_filt8_out, q0_filt8_out, \
q1_filt8_out, q2_filt8_out) \
- do { \
+ { \
v8u16 tmp_filt8_0, tmp_filt8_1, tmp_filt8_2; \
\
tmp_filt8_2 = p2_in + p1_in + p0_in; \
tmp_filt8_0 = q1_in + q3_in; \
tmp_filt8_1 = tmp_filt8_0 + tmp_filt8_1; \
q1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp_filt8_1, 3); \
- } while (0)
+ }
#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \
limit_in, b_limit_in, thresh_in, hev_out, mask_out, \
flat_out) \
- do { \
+ { \
v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
\
\
mask_out = limit_in < (v16u8)mask_out; \
mask_out = __msa_xori_b(mask_out, 0xff); \
- } while (0)
+ }
#endif // VPX_VPX_DSP_MIPS_LOOPFILTER_MSA_H_
#endif // (__mips == 64)
#define SH(val, pdst) \
- do { \
+ { \
uint8_t *pdst_sh_m = (uint8_t *)(pdst); \
const uint16_t val_sh_m = (val); \
\
\
: [pdst_sh_m] "=m"(*pdst_sh_m) \
: [val_sh_m] "r"(val_sh_m)); \
- } while (0)
+ }
#define SW(val, pdst) \
- do { \
+ { \
uint8_t *pdst_sw_m = (uint8_t *)(pdst); \
const uint32_t val_sw_m = (val); \
\
\
: [pdst_sw_m] "=m"(*pdst_sw_m) \
: [val_sw_m] "r"(val_sw_m)); \
- } while (0)
+ }
#define SD(val, pdst) \
- do { \
+ { \
uint8_t *pdst_sd_m = (uint8_t *)(pdst); \
uint32_t val0_sd_m, val1_sd_m; \
\
\
SW(val0_sd_m, pdst_sd_m); \
SW(val1_sd_m, pdst_sd_m + 4); \
- } while (0)
+ }
#endif // (__mips_isa_rev >= 6)
/* Description : Load 4 words with stride
Load word in 'out3' from (psrc + 3 * stride)
*/
#define LW4(psrc, stride, out0, out1, out2, out3) \
- do { \
+ { \
out0 = LW((psrc)); \
out1 = LW((psrc) + stride); \
out2 = LW((psrc) + 2 * stride); \
out3 = LW((psrc) + 3 * stride); \
- } while (0)
+ }
/* Description : Load double words with stride
Arguments : Inputs - psrc, stride
Load double word in 'out1' from (psrc + stride)
*/
#define LD2(psrc, stride, out0, out1) \
- do { \
+ { \
out0 = LD((psrc)); \
out1 = LD((psrc) + stride); \
- } while (0)
+ }
#define LD4(psrc, stride, out0, out1, out2, out3) \
- do { \
+ { \
LD2((psrc), stride, out0, out1); \
LD2((psrc) + 2 * stride, stride, out2, out3); \
- } while (0)
+ }
/* Description : Store 4 words with stride
Arguments : Inputs - in0, in1, in2, in3, pdst, stride
Store word from 'in3' to (pdst + 3 * stride)
*/
#define SW4(in0, in1, in2, in3, pdst, stride) \
- do { \
- SW(in0, (pdst)); \
+ { \
+ SW(in0, (pdst)) \
SW(in1, (pdst) + stride); \
SW(in2, (pdst) + 2 * stride); \
SW(in3, (pdst) + 3 * stride); \
- } while (0)
+ }
/* Description : Store 4 double words with stride
Arguments : Inputs - in0, in1, in2, in3, pdst, stride
Store double word from 'in3' to (pdst + 3 * stride)
*/
#define SD4(in0, in1, in2, in3, pdst, stride) \
- do { \
- SD(in0, (pdst)); \
+ { \
+ SD(in0, (pdst)) \
SD(in1, (pdst) + stride); \
SD(in2, (pdst) + 2 * stride); \
SD(in3, (pdst) + 3 * stride); \
- } while (0)
+ }
/* Description : Load vector elements with stride
Arguments : Inputs - psrc, stride
Load 16 byte elements in 'out1' from (psrc + stride)
*/
#define LD_V2(RTYPE, psrc, stride, out0, out1) \
- do { \
+ { \
out0 = LD_V(RTYPE, (psrc)); \
out1 = LD_V(RTYPE, (psrc) + stride); \
- } while (0)
+ }
#define LD_UB2(...) LD_V2(v16u8, __VA_ARGS__)
#define LD_SB2(...) LD_V2(v16i8, __VA_ARGS__)
#define LD_SH2(...) LD_V2(v8i16, __VA_ARGS__)
#define LD_SW2(...) LD_V2(v4i32, __VA_ARGS__)
#define LD_V3(RTYPE, psrc, stride, out0, out1, out2) \
- do { \
+ { \
LD_V2(RTYPE, (psrc), stride, out0, out1); \
out2 = LD_V(RTYPE, (psrc) + 2 * stride); \
- } while (0)
+ }
#define LD_UB3(...) LD_V3(v16u8, __VA_ARGS__)
#define LD_V4(RTYPE, psrc, stride, out0, out1, out2, out3) \
- do { \
+ { \
LD_V2(RTYPE, (psrc), stride, out0, out1); \
LD_V2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
- } while (0)
+ }
#define LD_UB4(...) LD_V4(v16u8, __VA_ARGS__)
#define LD_SB4(...) LD_V4(v16i8, __VA_ARGS__)
#define LD_SH4(...) LD_V4(v8i16, __VA_ARGS__)
#define LD_V5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \
- do { \
+ { \
LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
out4 = LD_V(RTYPE, (psrc) + 4 * stride); \
- } while (0)
+ }
#define LD_UB5(...) LD_V5(v16u8, __VA_ARGS__)
#define LD_SB5(...) LD_V5(v16i8, __VA_ARGS__)
#define LD_V7(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6) \
- do { \
+ { \
LD_V5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4); \
LD_V2(RTYPE, (psrc) + 5 * stride, stride, out5, out6); \
- } while (0)
+ }
#define LD_SB7(...) LD_V7(v16i8, __VA_ARGS__)
#define LD_V8(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \
out7) \
- do { \
+ { \
LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
LD_V4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \
- } while (0)
+ }
#define LD_UB8(...) LD_V8(v16u8, __VA_ARGS__)
#define LD_SB8(...) LD_V8(v16i8, __VA_ARGS__)
#define LD_SH8(...) LD_V8(v8i16, __VA_ARGS__)
#define LD_V16(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \
out7, out8, out9, out10, out11, out12, out13, out14, out15) \
- do { \
+ { \
LD_V8(RTYPE, (psrc), stride, out0, out1, out2, out3, out4, out5, out6, \
out7); \
LD_V8(RTYPE, (psrc) + 8 * stride, stride, out8, out9, out10, out11, out12, \
out13, out14, out15); \
- } while (0)
+ }
#define LD_SH16(...) LD_V16(v8i16, __VA_ARGS__)
/* Description : Load 4x4 block of signed halfword elements from 1D source
Outputs - out0, out1, out2, out3
*/
#define LD4x4_SH(psrc, out0, out1, out2, out3) \
- do { \
+ { \
out0 = LD_SH(psrc); \
out2 = LD_SH(psrc + 8); \
out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
- } while (0)
+ }
/* Description : Store vectors with stride
Arguments : Inputs - in0, in1, pdst, stride
Store 16 byte elements from 'in1' to (pdst + stride)
*/
#define ST_V2(RTYPE, in0, in1, pdst, stride) \
- do { \
+ { \
ST_V(RTYPE, in0, (pdst)); \
ST_V(RTYPE, in1, (pdst) + stride); \
- } while (0)
+ }
#define ST_UB2(...) ST_V2(v16u8, __VA_ARGS__)
#define ST_SH2(...) ST_V2(v8i16, __VA_ARGS__)
#define ST_SW2(...) ST_V2(v4i32, __VA_ARGS__)
#define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \
- do { \
+ { \
ST_V2(RTYPE, in0, in1, (pdst), stride); \
ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
- } while (0)
+ }
#define ST_UB4(...) ST_V4(v16u8, __VA_ARGS__)
#define ST_SH4(...) ST_V4(v8i16, __VA_ARGS__)
#define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
- do { \
+ { \
ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride); \
ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
- } while (0)
+ }
#define ST_UB8(...) ST_V8(v16u8, __VA_ARGS__)
#define ST_SH8(...) ST_V8(v8i16, __VA_ARGS__)
the GP register and stored to (pdst + 3 * stride)
*/
#define ST2x4_UB(in, stidx, pdst, stride) \
- do { \
+ { \
uint16_t out0_m, out1_m, out2_m, out3_m; \
uint8_t *pblk_2x4_m = (uint8_t *)(pdst); \
\
SH(out1_m, pblk_2x4_m + stride); \
SH(out2_m, pblk_2x4_m + 2 * stride); \
SH(out3_m, pblk_2x4_m + 3 * stride); \
- } while (0)
+ }
/* Description : Store 4x2 byte block to destination memory from input vector
Arguments : Inputs - in, pdst, stride
register and stored to (pdst + stride)
*/
#define ST4x2_UB(in, pdst, stride) \
- do { \
+ { \
uint32_t out0_m, out1_m; \
uint8_t *pblk_4x2_m = (uint8_t *)(pdst); \
\
\
SW(out0_m, pblk_4x2_m); \
SW(out1_m, pblk_4x2_m + stride); \
- } while (0)
+ }
/* Description : Store 4x4 byte block to destination memory from input vector
Arguments : Inputs - in0, in1, pdst, stride
GP register and stored to (pdst + 3 * stride)
*/
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
- do { \
+ { \
uint32_t out0_m, out1_m, out2_m, out3_m; \
uint8_t *pblk_4x4_m = (uint8_t *)(pdst); \
\
out3_m = __msa_copy_u_w((v4i32)in1, idx3); \
\
SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \
- } while (0)
+ }
#define ST4x8_UB(in0, in1, pdst, stride) \
- do { \
+ { \
uint8_t *pblk_4x8 = (uint8_t *)(pdst); \
\
ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \
ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \
- } while (0)
+ }
/* Description : Store 8x1 byte block to destination memory from input vector
Arguments : Inputs - in, pdst
GP register and stored to (pdst)
*/
#define ST8x1_UB(in, pdst) \
- do { \
+ { \
uint64_t out0_m; \
\
out0_m = __msa_copy_u_d((v2i64)in, 0); \
SD(out0_m, pdst); \
- } while (0)
+ }
/* Description : Store 8x2 byte block to destination memory from input vector
Arguments : Inputs - in, pdst, stride
GP register and stored to (pdst + stride)
*/
#define ST8x2_UB(in, pdst, stride) \
- do { \
+ { \
uint64_t out0_m, out1_m; \
uint8_t *pblk_8x2_m = (uint8_t *)(pdst); \
\
\
SD(out0_m, pblk_8x2_m); \
SD(out1_m, pblk_8x2_m + stride); \
- } while (0)
+ }
/* Description : Store 8x4 byte block to destination memory from input
vectors
GP register and stored to (pdst + 3 * stride)
*/
#define ST8x4_UB(in0, in1, pdst, stride) \
- do { \
+ { \
uint64_t out0_m, out1_m, out2_m, out3_m; \
uint8_t *pblk_8x4_m = (uint8_t *)(pdst); \
\
out3_m = __msa_copy_u_d((v2i64)in1, 1); \
\
SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \
- } while (0)
+ }
/* Description : average with rounding (in0 + in1 + 1) / 2.
Arguments : Inputs - in0, in1, in2, in3,
with rounding is calculated and written to 'out0'
*/
#define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \
out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
- } while (0)
+ }
#define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
#define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
- AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1); \
- AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ { \
+ AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
+ AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
+ }
#define AVER_UB4_UB(...) AVER_UB4(v16u8, __VA_ARGS__)
/* Description : Immediate number of elements to slide with zero
value specified in the 'slide_val'
*/
#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) \
- do { \
+ { \
v16i8 zero_m = { 0 }; \
out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \
out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \
- } while (0)
+ }
#define SLDI_B2_0_SW(...) SLDI_B2_0(v4i32, __VA_ARGS__)
#define SLDI_B4_0(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, \
slide_val) \
- do { \
+ { \
SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val); \
SLDI_B2_0(RTYPE, in2, in3, out2, out3, slide_val); \
- } while (0)
+ }
#define SLDI_B4_0_UB(...) SLDI_B4_0(v16u8, __VA_ARGS__)
/* Description : Immediate number of elements to slide
value specified in the 'slide_val'
*/
#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \
- do { \
+ { \
out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val); \
out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val); \
- } while (0)
+ }
#define SLDI_B2_UB(...) SLDI_B2(v16u8, __VA_ARGS__)
#define SLDI_B2_SH(...) SLDI_B2(v8i16, __VA_ARGS__)
#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, out0, out1, \
out2, slide_val) \
- do { \
- SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val); \
+ { \
+ SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \
out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val); \
- } while (0)
+ }
#define SLDI_B3_SB(...) SLDI_B3(v16i8, __VA_ARGS__)
#define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__)
'out0' as per control vector 'mask0'
*/
#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \
out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
- } while (0)
+ }
#define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
#define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
#define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
#define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3, out0, out1, out2, \
out3) \
- do { \
+ { \
VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1); \
VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3); \
- } while (0)
+ }
#define VSHF_B4_SB(...) VSHF_B4(v16i8, __VA_ARGS__)
#define VSHF_B4_SH(...) VSHF_B4(v8i16, __VA_ARGS__)
are added together and written to the 'out0' vector
*/
#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0); \
out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1); \
- } while (0)
+ }
#define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__)
#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__)
/* Description : Dot product of byte vector elements
are added together and written to the 'out0' vector
*/
#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \
out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \
- } while (0)
+ }
#define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__)
/* Description : Dot product of halfword vector elements
are added together and written to the 'out0' vector
*/
#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \
out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \
- } while (0)
+ }
#define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
/* Description : Dot product of word vector elements
are added together and written to the 'out0' vector
*/
#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0); \
out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1); \
- } while (0)
+ }
#define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
/* Description : Dot product & addition of byte vector elements
are added to the 'out0' vector
*/
#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \
out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \
- } while (0)
+ }
#define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__)
#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \
cnst3, out0, out1, out2, out3) \
- do { \
+ { \
DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
- } while (0)
+ }
#define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__)
/* Description : Dot product & addition of halfword vector elements
are added to the 'out0' vector
*/
#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \
out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \
- } while (0)
+ }
#define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
/* Description : Dot product & addition of double word vector elements
are added to the 'out0' vector
*/
#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \
out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \
- } while (0)
+ }
#define DPADD_SD2_SD(...) DPADD_SD2(v2i64, __VA_ARGS__)
/* Description : Minimum values between unsigned elements of
'min_vec' are written to output vector 'in0'
*/
#define MIN_UH2(RTYPE, in0, in1, min_vec) \
- do { \
+ { \
in0 = (RTYPE)__msa_min_u_h((v8u16)in0, min_vec); \
in1 = (RTYPE)__msa_min_u_h((v8u16)in1, min_vec); \
- } while (0)
+ }
#define MIN_UH2_UH(...) MIN_UH2(v8u16, __VA_ARGS__)
#define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) \
- do { \
+ { \
MIN_UH2(RTYPE, in0, in1, min_vec); \
MIN_UH2(RTYPE, in2, in3, min_vec); \
- } while (0)
+ }
#define MIN_UH4_UH(...) MIN_UH4(v8u16, __VA_ARGS__)
/* Description : Clips all signed halfword elements of input vector
out_m; \
})
#define CLIP_SH2_0_255(in0, in1) \
- do { \
+ { \
in0 = CLIP_SH_0_255(in0); \
in1 = CLIP_SH_0_255(in1); \
- } while (0)
+ }
#define CLIP_SH4_0_255(in0, in1, in2, in3) \
- do { \
+ { \
CLIP_SH2_0_255(in0, in1); \
CLIP_SH2_0_255(in2, in3); \
- } while (0)
+ }
/* Description : Horizontal addition of 4 signed word elements of input vector
Arguments : Input - in (signed word vector)
halfword result is written to 'out0'
*/
#define HADD_UB2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0); \
out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1); \
- } while (0)
+ }
#define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__)
#define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
HADD_UB2(RTYPE, in0, in1, out0, out1); \
HADD_UB2(RTYPE, in2, in3, out2, out3); \
- } while (0)
+ }
#define HADD_UB4_UH(...) HADD_UB4(v8u16, __VA_ARGS__)
/* Description : Horizontal subtraction of unsigned byte vector elements
halfword result is written to 'out0'
*/
#define HSUB_UB2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \
out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \
- } while (0)
+ }
#define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
/* Description : SAD (Sum of Absolute Difference)
word result is written to 'out0'
*/
#define HSUB_UH2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0); \
out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1); \
- } while (0)
+ }
#define HSUB_UH2_SW(...) HSUB_UH2(v4i32, __VA_ARGS__)
/* Description : Set element n input vector to GPR value
Details : Set element 0 in vector 'out' to value specified in 'in0'
*/
#define INSERT_W2(RTYPE, in0, in1, out) \
- do { \
+ { \
out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
- } while (0)
+ }
#define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
#define INSERT_W4(RTYPE, in0, in1, in2, in3, out) \
- do { \
+ { \
out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \
out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \
- } while (0)
+ }
#define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
#define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
#define INSERT_D2(RTYPE, in0, in1, out) \
- do { \
+ { \
out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \
out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \
- } while (0)
+ }
#define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
#define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
#define INSERT_D2_SH(...) INSERT_D2(v8i16, __VA_ARGS__)
and written to 'out0'
*/
#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \
out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \
- } while (0)
+ }
#define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
#define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
and written to 'out0'
*/
#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \
- } while (0)
+ }
#define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
#define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
#define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
and written to 'out0'
*/
#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \
- } while (0)
+ }
#define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
/* Description : Interleave even double word elements from vectors
and written to 'out0'
*/
#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \
out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \
- } while (0)
+ }
#define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
/* Description : Interleave left half of byte elements from vectors
and written to 'out0'.
*/
#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \
- } while (0)
+ }
#define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
#define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
#define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__)
#define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__)
#define ILVL_B4_UH(...) ILVL_B4(v8u16, __VA_ARGS__)
interleaved and written to 'out0'.
*/
#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \
- } while (0)
+ }
#define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
#define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__)
and written to 'out0'.
*/
#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \
- } while (0)
+ }
#define ILVL_W2_UB(...) ILVL_W2(v16u8, __VA_ARGS__)
#define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
and written to out0.
*/
#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \
- } while (0)
+ }
#define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
#define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
#define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
#define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
#define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
#define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \
in11, in12, in13, in14, in15, out0, out1, out2, out3, out4, \
out5, out6, out7) \
- do { \
+ { \
ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
out3); \
ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5, \
out6, out7); \
- } while (0)
+ }
#define ILVR_B8_UH(...) ILVR_B8(v8u16, __VA_ARGS__)
/* Description : Interleave right half of halfword elements from vectors
interleaved and written to 'out0'.
*/
#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \
- } while (0)
+ }
#define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
#define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \
- } while (0)
+ }
#define ILVR_W2_UB(...) ILVR_W2(v16u8, __VA_ARGS__)
#define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
#define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVR_W4_UB(...) ILVR_W4(v16u8, __VA_ARGS__)
/* Description : Interleave right half of double word elements from vectors
interleaved and written to 'out0'.
*/
#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \
out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \
- } while (0)
+ }
#define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
#define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
#define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
#define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
- do { \
+ { \
ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
out2 = (RTYPE)__msa_ilvr_d((v2i64)(in4), (v2i64)(in5)); \
- } while (0)
+ }
#define ILVR_D3_SB(...) ILVR_D3(v16i8, __VA_ARGS__)
#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
#define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
interleaved and written to 'out0'
*/
#define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
- } while (0)
+ }
#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
#define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
#define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
#define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
#define ILVRL_H2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
- } while (0)
+ }
#define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
#define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
#define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
- } while (0)
+ }
#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
#define ILVRL_W2_SB(...) ILVRL_W2(v16i8, __VA_ARGS__)
#define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
The results are written in place
*/
#define SAT_UH2(RTYPE, in0, in1, sat_val) \
- do { \
+ { \
in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val); \
in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val); \
- } while (0)
+ }
#define SAT_UH2_UH(...) SAT_UH2(v8u16, __VA_ARGS__)
#define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) \
- do { \
+ { \
SAT_UH2(RTYPE, in0, in1, sat_val); \
- SAT_UH2(RTYPE, in2, in3, sat_val); \
- } while (0)
+ SAT_UH2(RTYPE, in2, in3, sat_val) \
+ }
#define SAT_UH4_UH(...) SAT_UH4(v8u16, __VA_ARGS__)
/* Description : Saturate the halfword element values to the max
The results are written in place
*/
#define SAT_SH2(RTYPE, in0, in1, sat_val) \
- do { \
+ { \
in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \
in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \
- } while (0)
+ }
#define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__)
#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \
- do { \
+ { \
SAT_SH2(RTYPE, in0, in1, sat_val); \
SAT_SH2(RTYPE, in2, in3, sat_val); \
- } while (0)
+ }
#define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__)
/* Description : Indexed halfword element values are replicated to all
Valid index range for halfword operation is 0-7
*/
#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0); \
out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1); \
- } while (0)
+ }
#define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__)
#define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, out0, out1, out2, out3) \
- do { \
+ { \
SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3); \
- } while (0)
+ }
#define SPLATI_H4_SB(...) SPLATI_H4(v16i8, __VA_ARGS__)
#define SPLATI_H4_SH(...) SPLATI_H4(v8i16, __VA_ARGS__)
half of 'out0'.
*/
#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \
- } while (0)
+ }
#define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
#define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
#define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
#define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
#define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
right half of 'out0'.
*/
#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \
- } while (0)
+ }
#define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
#define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
/* Description : Pack even double word elements of vector pairs
half of 'out0'.
*/
#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \
out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \
- } while (0)
+ }
#define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
#define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
#define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define PCKEV_D4_UB(...) PCKEV_D4(v16u8, __VA_ARGS__)
/* Description : Each byte element is logically xor'ed with immediate 128
logically xor'ed with 128 and the result is stored in-place.
*/
#define XORI_B2_128(RTYPE, in0, in1) \
- do { \
+ { \
in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \
in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \
- } while (0)
+ }
#define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__)
#define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__)
#define XORI_B3_128(RTYPE, in0, in1, in2) \
- do { \
+ { \
XORI_B2_128(RTYPE, in0, in1); \
in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \
- } while (0)
+ }
#define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__)
#define XORI_B4_128(RTYPE, in0, in1, in2, in3) \
- do { \
+ { \
XORI_B2_128(RTYPE, in0, in1); \
XORI_B2_128(RTYPE, in2, in3); \
- } while (0)
+ }
#define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__)
#define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__)
#define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) \
- do { \
+ { \
XORI_B4_128(RTYPE, in0, in1, in2, in3); \
XORI_B3_128(RTYPE, in4, in5, in6); \
- } while (0)
+ }
#define XORI_B7_128_SB(...) XORI_B7_128(v16i8, __VA_ARGS__)
/* Description : Average of signed halfword elements -> (a + b) / 2
*/
#define AVE_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
out0 = (RTYPE)__msa_ave_s_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ave_s_h((v8i16)in2, (v8i16)in3); \
out2 = (RTYPE)__msa_ave_s_h((v8i16)in4, (v8i16)in5); \
out3 = (RTYPE)__msa_ave_s_h((v8i16)in6, (v8i16)in7); \
- } while (0)
+ }
#define AVE_SH4_SH(...) AVE_SH4(v8i16, __VA_ARGS__)
/* Description : Addition of signed halfword elements and signed saturation
between halfword data type range
*/
#define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = (RTYPE)__msa_adds_s_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_adds_s_h((v8i16)in2, (v8i16)in3); \
- } while (0)
+ }
#define ADDS_SH2_SH(...) ADDS_SH2(v8i16, __VA_ARGS__)
#define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3) \
- do { \
+ { \
ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1); \
ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
#define ADDS_SH4_SH(...) ADDS_SH4(v8i16, __VA_ARGS__)
/* Description : Shift left all elements of vector (generic for all data types)
the result is written in-place.
*/
#define SLLI_4V(in0, in1, in2, in3, shift) \
- do { \
+ { \
in0 = in0 << shift; \
in1 = in1 << shift; \
in2 = in2 << shift; \
in3 = in3 << shift; \
- } while (0)
+ }
/* Description : Arithmetic shift right all elements of vector
(generic for all data types)
the result is written in-place. 'shift' is a GP variable.
*/
#define SRA_2V(in0, in1, shift) \
- do { \
+ { \
in0 = in0 >> shift; \
in1 = in1 >> shift; \
- } while (0)
+ }
#define SRA_4V(in0, in1, in2, in3, shift) \
- do { \
+ { \
in0 = in0 >> shift; \
in1 = in1 >> shift; \
in2 = in2 >> shift; \
in3 = in3 >> shift; \
- } while (0)
+ }
/* Description : Shift right arithmetic rounded words
Arguments : Inputs - in0, in1, shift
'shift' is a vector.
*/
#define SRAR_W2(RTYPE, in0, in1, shift) \
- do { \
+ { \
in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \
in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \
- } while (0)
+ }
#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \
- do { \
+ { \
SRAR_W2(RTYPE, in0, in1, shift) \
SRAR_W2(RTYPE, in2, in3, shift) \
- } while (0)
+ }
#define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
/* Description : Shift right arithmetic rounded (immediate)
'shift' is an immediate value.
*/
#define SRARI_H2(RTYPE, in0, in1, shift) \
- do { \
+ { \
in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \
in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \
- } while (0)
+ }
#define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
#define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \
- do { \
+ { \
SRARI_H2(RTYPE, in0, in1, shift); \
SRARI_H2(RTYPE, in2, in3, shift); \
- } while (0)
+ }
#define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__)
#define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__)
#define SRARI_W2(RTYPE, in0, in1, shift) \
- do { \
+ { \
in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \
in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \
- } while (0)
+ }
#define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
- do { \
+ { \
SRARI_W2(RTYPE, in0, in1, shift); \
SRARI_W2(RTYPE, in2, in3, shift); \
- } while (0)
+ }
#define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
/* Description : Logical shift right all elements of vector (immediate)
the result is written in-place. 'shift' is an immediate value.
*/
#define SRLI_H4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, shift) \
- do { \
+ { \
out0 = (RTYPE)__msa_srli_h((v8i16)in0, shift); \
out1 = (RTYPE)__msa_srli_h((v8i16)in1, shift); \
out2 = (RTYPE)__msa_srli_h((v8i16)in2, shift); \
out3 = (RTYPE)__msa_srli_h((v8i16)in3, shift); \
- } while (0)
+ }
#define SRLI_H4_SH(...) SRLI_H4(v8i16, __VA_ARGS__)
/* Description : Multiplication of pairs of vectors
and the result is written to 'out0'
*/
#define MUL2(in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = in0 * in1; \
out1 = in2 * in3; \
- } while (0)
+ }
#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
- do { \
+ { \
MUL2(in0, in1, in2, in3, out0, out1); \
MUL2(in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
/* Description : Addition of 2 pairs of vectors
Arguments : Inputs - in0, in1, in2, in3
to 'out0'.
*/
#define ADD2(in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = in0 + in1; \
out1 = in2 + in3; \
- } while (0)
+ }
#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
- do { \
+ { \
ADD2(in0, in1, in2, in3, out0, out1); \
ADD2(in4, in5, in6, in7, out2, out3); \
- } while (0)
+ }
/* Description : Subtraction of 2 pairs of vectors
Arguments : Inputs - in0, in1, in2, in3
written to 'out0'.
*/
#define SUB2(in0, in1, in2, in3, out0, out1) \
- do { \
+ { \
out0 = in0 - in1; \
out1 = in2 - in3; \
- } while (0)
+ }
#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
- do { \
+ { \
out0 = in0 - in1; \
out1 = in2 - in3; \
out2 = in4 - in5; \
out3 = in6 - in7; \
- } while (0)
+ }
/* Description : Sign extend halfword elements from right half of the vector
Arguments : Input - in (halfword vector)
4 word elements keeping sign intact
*/
#define UNPCK_R_SH_SW(in, out) \
- do { \
+ { \
v8i16 sign_m; \
\
sign_m = __msa_clti_s_h((v8i16)in, 0); \
out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \
- } while (0)
+ }
/* Description : Sign extend byte elements from input vector and return
halfword results in pair of vectors
generate 8 signed halfword elements in 'out1'
*/
#define UNPCK_SB_SH(in, out0, out1) \
- do { \
+ { \
v16i8 tmp_m; \
\
tmp_m = __msa_clti_s_b((v16i8)in, 0); \
ILVRL_B2_SH(tmp_m, in, out0, out1); \
- } while (0)
+ }
/* Description : Zero extend unsigned byte elements to halfword elements
Arguments : Input - in (unsigned byte vector)
Zero extended left half of vector is returned in 'out1'
*/
#define UNPCK_UB_SH(in, out0, out1) \
- do { \
+ { \
v16i8 zero_m = { 0 }; \
\
ILVRL_B2_SH(zero_m, in, out0, out1); \
- } while (0)
+ }
/* Description : Sign extend halfword elements from input vector and return
the result in pair of vectors
generate 4 signed word elements in 'out1'
*/
#define UNPCK_SH_SW(in, out0, out1) \
- do { \
+ { \
v8i16 tmp_m; \
\
tmp_m = __msa_clti_s_h((v8i16)in, 0); \
ILVRL_H2_SW(tmp_m, in, out0, out1); \
- } while (0)
+ }
/* Description : Butterfly of 4 input vectors
Arguments : Inputs - in0, in1, in2, in3
Details : Butterfly operation
*/
#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
out0 = in0 + in3; \
out1 = in1 + in2; \
\
out2 = in1 - in2; \
out3 = in0 - in3; \
- } while (0)
+ }
/* Description : Butterfly of 8 input vectors
Arguments : Inputs - in0 ... in7
*/
#define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
out3, out4, out5, out6, out7) \
- do { \
+ { \
out0 = in0 + in7; \
out1 = in1 + in6; \
out2 = in2 + in5; \
out5 = in2 - in5; \
out6 = in1 - in6; \
out7 = in0 - in7; \
- } while (0)
+ }
/* Description : Butterfly of 16 input vectors
Arguments : Inputs - in0 ... in15
in11, in12, in13, in14, in15, out0, out1, out2, out3, \
out4, out5, out6, out7, out8, out9, out10, out11, out12, \
out13, out14, out15) \
- do { \
+ { \
out0 = in0 + in15; \
out1 = in1 + in14; \
out2 = in2 + in13; \
out13 = in2 - in13; \
out14 = in1 - in14; \
out15 = in0 - in15; \
- } while (0)
+ }
/* Description : Transpose input 8x8 byte block
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
*/
#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, \
out1, out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
\
ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \
SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8); \
SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8); \
- } while (0)
+ }
#define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__)
/* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
in10, in11, in12, in13, in14, in15, out0, out1, \
out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
\
tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \
out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \
- } while (0)
+ }
/* Description : Transpose 4x4 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3
Return Type - signed halfword
*/
#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 s0_m, s1_m; \
\
ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
ILVRL_W2_SH(s1_m, s0_m, out0, out2); \
out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2); \
- } while (0)
+ }
/* Description : Transpose 4x8 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
*/
#define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n; \
v8i16 zero_m = { 0 }; \
out5 = zero_m; \
out6 = zero_m; \
out7 = zero_m; \
- } while (0)
+ }
/* Description : Transpose 8x4 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
Return Type - signed halfword
*/
#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
\
ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m); \
ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m); \
ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \
ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \
- } while (0)
+ }
/* Description : Transpose 8x8 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
*/
#define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, \
out1, out2, out3, out4, out5, out6, out7) \
- do { \
+ { \
v8i16 s0_m, s1_m; \
v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
out3 = (RTYPE)__msa_pckod_d((v2i64)tmp1_m, (v2i64)tmp5_m); \
out5 = (RTYPE)__msa_pckod_d((v2i64)tmp2_m, (v2i64)tmp6_m); \
out7 = (RTYPE)__msa_pckod_d((v2i64)tmp3_m, (v2i64)tmp7_m); \
- } while (0)
+ }
#define TRANSPOSE8x8_SH_SH(...) TRANSPOSE8x8_H(v8i16, __VA_ARGS__)
/* Description : Transpose 4x4 block with word elements in vectors
Return Type - signed word
*/
#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
- do { \
+ { \
v4i32 s0_m, s1_m, s2_m, s3_m; \
\
ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \
out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \
out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \
- } while (0)
+ }
/* Description : Add block 4x4
Arguments : Inputs - in0, in1, in2, in3, pdst, stride
the destination bytes, clipped between 0-255 and stored.
*/
#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
- do { \
+ { \
uint32_t src0_m, src1_m, src2_m, src3_m; \
v8i16 inp0_m, inp1_m, res0_m, res1_m; \
v16i8 dst0_m = { 0 }; \
v16i8 dst1_m = { 0 }; \
v16i8 zero_m = { 0 }; \
\
- ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m); \
+ ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \
LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
INSERT_W2_SB(src0_m, src1_m, dst0_m); \
INSERT_W2_SB(src2_m, src3_m, dst1_m); \
CLIP_SH2_0_255(res0_m, res1_m); \
PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride); \
- } while (0)
+ }
/* Description : Pack even elements of input vectors & xor with 128
Arguments : Inputs - in0, in1
Arguments : Inputs - in0, in1, in2, in3, dst0, dst1, pdst, stride
*/
#define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, dst0, dst1, pdst, stride) \
- do { \
+ { \
v16u8 tmp0_m, tmp1_m; \
uint8_t *pdst_m = (uint8_t *)(pdst); \
\
tmp1_m = PCKEV_XORI128_UB(in2, in3); \
AVER_UB2_UB(tmp0_m, dst0, tmp1_m, dst1, tmp0_m, tmp1_m); \
ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride); \
- } while (0)
+ }
/* Description : Pack even byte elements and store byte vector in destination
memory
Arguments : Inputs - in0, in1, pdst
*/
#define PCKEV_ST_SB(in0, in1, pdst) \
- do { \
+ { \
v16i8 tmp_m; \
\
tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0); \
ST_SB(tmp_m, (pdst)); \
- } while (0)
+ }
/* Description : Horizontal 2 tap filter kernel code
Arguments : Inputs - in0, in1, mask, coeff, shift
#include "vpx_dsp/mips/macros_msa.h"
#define SAD_INSVE_W4(RTYPE, in0, in1, in2, in3, out) \
- do { \
+ { \
out = (RTYPE)__msa_insve_w((v4i32)out, 0, (v4i32)in0); \
out = (RTYPE)__msa_insve_w((v4i32)out, 1, (v4i32)in1); \
out = (RTYPE)__msa_insve_w((v4i32)out, 2, (v4i32)in2); \
out = (RTYPE)__msa_insve_w((v4i32)out, 3, (v4i32)in3); \
- } while (0)
+ }
#define SAD_INSVE_W4_UB(...) SAD_INSVE_W4(v16u8, __VA_ARGS__)
static uint32_t sad_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
};
#define CALC_MSE_AVG_B(src, ref, var, sub) \
- do { \
+ { \
v16u8 src_l0_m, src_l1_m; \
v8i16 res_l0_m, res_l1_m; \
\
DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
\
(sub) += res_l0_m + res_l1_m; \
- } while (0)
+ }
#define VARIANCE_WxH(sse, diff, shift) \
(sse) - (((uint32_t)(diff) * (diff)) >> (shift))
#include "vpx_dsp/mips/macros_msa.h"
#define DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) \
- do { \
+ { \
v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m; \
v8i16 k0_m, k1_m, k2_m, zero = { 0 }; \
\
DOTP_SH2_SW(s3_m, s2_m, k2_m, k2_m, s1_m, s0_m); \
SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS); \
out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \
- } while (0)
+ }
#define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, dst0, \
dst1, dst2, dst3) \
- do { \
+ { \
v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m; \
v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m; \
\
SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, DCT_CONST_BITS); \
PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m, dst0, \
dst1, dst2, dst3); \
- } while (0)
+ }
#define DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) \
({ \
})
#define MADD_SHORT(m0, m1, c0, c1, res0, res1) \
- do { \
+ { \
v4i32 madd0_m, madd1_m, madd2_m, madd3_m; \
v8i16 madd_s0_m, madd_s1_m; \
\
madd0_m, madd1_m, madd2_m, madd3_m); \
SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, DCT_CONST_BITS); \
PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1); \
- } while (0)
+ }
#define MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \
out2, out3) \
- do { \
+ { \
v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m; \
\
BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, m4_m, m5_m, tmp3_m, tmp2_m); \
SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \
PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3); \
- } while (0)
+ }
#endif // VPX_VPX_DSP_MIPS_TXFM_MACROS_MSA_H_
#include "vpx_dsp/mips/macros_msa.h"
#define CALC_MSE_B(src, ref, var) \
- do { \
+ { \
v16u8 src_l0_m, src_l1_m; \
v8i16 res_l0_m, res_l1_m; \
\
ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \
HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \
DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
- } while (0)
+ }
#define CALC_MSE_AVG_B(src, ref, var, sub) \
- do { \
+ { \
v16u8 src_l0_m, src_l1_m; \
v8i16 res_l0_m, res_l1_m; \
\
DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
\
sub += res_l0_m + res_l1_m; \
- } while (0)
+ }
#define VARIANCE_WxH(sse, diff, shift) \
(sse) - (((uint32_t)(diff) * (diff)) >> (shift))
#define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
mask2, mask3, filt0, filt1, filt2, filt3, \
out0, out1) \
- do { \
+ { \
v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
v8i16 res0_m, res1_m, res2_m, res3_m; \
\
VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \
DPADD_SB2_SH(vec6_m, vec7_m, filt3, filt3, res2_m, res3_m); \
ADDS_SH2_SH(res0_m, res2_m, res1_m, res3_m, out0, out1); \
- } while (0)
+ }
#define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \
mask2, mask3, filt0, filt1, filt2, filt3, \
out0, out1, out2, out3) \
- do { \
+ { \
v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m; \
\
res4_m, res5_m, res6_m, res7_m); \
ADDS_SH4_SH(res0_m, res4_m, res1_m, res5_m, res2_m, res6_m, res3_m, \
res7_m, out0, out1, out2, out3); \
- } while (0)
+ }
#define PCKEV_XORI128_AVG_ST_UB(in0, in1, dst, pdst) \
- do { \
+ { \
v16u8 tmp_m; \
\
tmp_m = PCKEV_XORI128_UB(in1, in0); \
tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst); \
ST_UB(tmp_m, (pdst)); \
- } while (0)
+ }
#define PCKEV_AVG_ST_UB(in0, in1, dst, pdst) \
- do { \
+ { \
v16u8 tmp_m; \
\
tmp_m = (v16u8)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst); \
ST_UB(tmp_m, (pdst)); \
- } while (0)
+ }
#define PCKEV_AVG_ST8x4_UB(in0, in1, in2, in3, dst0, dst1, pdst, stride) \
- do { \
+ { \
v16u8 tmp0_m, tmp1_m; \
uint8_t *pdst_m = (uint8_t *)(pdst); \
\
PCKEV_B2_UB(in1, in0, in3, in2, tmp0_m, tmp1_m); \
AVER_UB2_UB(tmp0_m, dst0, tmp1_m, dst1, tmp0_m, tmp1_m); \
ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride); \
- } while (0)
+ }
#endif // VPX_VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_