DEF_HELPER_1(neon_narrow_u8, i32, i64)
DEF_HELPER_1(neon_narrow_u16, i32, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
}
+uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
+{
+ uint16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s & 0x8000) { \
+ SET_QC(); \
+ } else { \
+ if (s > 0xff) { \
+ d = 0xff; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2); \
+ }
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
{
uint16_t s;
return res;
}
+uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
+{
+ uint32_t high;
+ uint32_t low;
+ low = x;
+ if (low & 0x80000000) {
+ low = 0;
+ SET_QC();
+ } else if (low > 0xffff) {
+ low = 0xffff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high & 0x80000000) {
+ high = 0;
+ SET_QC();
+ } else if (high > 0xffff) {
+ high = 0xffff;
+ SET_QC();
+ }
+ return low | (high << 16);
+}
+
uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
{
uint32_t high;
return (uint16_t)low | (high << 16);
}
+uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
+{
+ if (x & 0x8000000000000000ull) {
+ SET_QC();
+ return 0;
+ }
+ if (x > 0xffffffffu) {
+ SET_QC();
+ return 0xffffffffu;
+ }
+ return x;
+}
+
uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
{
if (x > 0xffffffffu) {
}
}
+static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
+ default: abort();
+ }
+}
+
static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
int q, int u)
{
for (pass = 0; pass < 2; pass++) {
neon_load_reg64(cpu_V0, rm + pass);
tmp = new_tmp();
- if (op == 36 && q == 0) {
- gen_neon_narrow(size, tmp, cpu_V0);
- } else if (q) {
- gen_neon_narrow_satu(size, tmp, cpu_V0);
- } else {
- gen_neon_narrow_sats(size, tmp, cpu_V0);
+ if (op == 36) {
+ if (q) { /* VQMOVUN */
+ gen_neon_unarrow_sats(size, tmp, cpu_V0);
+ } else { /* VMOVN */
+ gen_neon_narrow(size, tmp, cpu_V0);
+ }
+ } else { /* VQMOVN */
+ if (q) {
+ gen_neon_narrow_satu(size, tmp, cpu_V0);
+ } else {
+ gen_neon_narrow_sats(size, tmp, cpu_V0);
+ }
}
if (pass == 0) {
tmp2 = tmp;