2 * S/390 condition code helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu/host-utils.h"
25 /* #define DEBUG_HELPER */
27 #define HELPER_LOG(x...) qemu_log(x)
29 #define HELPER_LOG(x...)
32 static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
36 } else if (src < dst) {
43 static uint32_t cc_calc_ltgt0_32(int32_t dst)
45 return cc_calc_ltgt_32(dst, 0);
48 static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
52 } else if (src < dst) {
59 static uint32_t cc_calc_ltgt0_64(int64_t dst)
61 return cc_calc_ltgt_64(dst, 0);
64 static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
68 } else if (src < dst) {
75 static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
79 } else if (src < dst) {
86 static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
88 uint32_t r = val & mask;
92 } else if (r == mask) {
99 static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
101 uint64_t r = val & mask;
105 } else if (r == mask) {
108 int top = clz64(mask);
109 if ((int64_t)(val << top) < 0) {
117 static uint32_t cc_calc_nz(uint64_t dst)
122 static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
124 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
125 return 3; /* overflow */
137 static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
139 return (ar != 0) + 2 * (ar < a1);
142 static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
144 /* Recover a2 + carry_in. */
145 uint64_t a2c = ar - a1;
146 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
147 int carry_out = (a2c < a2) || (ar < a1);
149 return (ar != 0) + 2 * carry_out;
152 static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
154 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
155 return 3; /* overflow */
167 static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
180 static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
182 /* We had borrow-in if normal subtraction isn't equal. */
183 int borrow_in = ar - (a1 - a2);
186 /* If a2 was ULONG_MAX, and borrow_in, then a2 is logically 65 bits,
187 and we must have had borrow out. */
188 if (borrow_in && a2 == (uint64_t)-1) {
192 borrow_out = (a2 > a1);
195 return (ar != 0) + 2 * !borrow_out;
198 static uint32_t cc_calc_abs_64(int64_t dst)
200 if ((uint64_t)dst == 0x8000000000000000ULL) {
209 static uint32_t cc_calc_nabs_64(int64_t dst)
214 static uint32_t cc_calc_comp_64(int64_t dst)
216 if ((uint64_t)dst == 0x8000000000000000ULL) {
218 } else if (dst < 0) {
220 } else if (dst > 0) {
228 static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
230 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
231 return 3; /* overflow */
243 static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
245 return (ar != 0) + 2 * (ar < a1);
248 static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
250 /* Recover a2 + carry_in. */
251 uint32_t a2c = ar - a1;
252 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
253 int carry_out = (a2c < a2) || (ar < a1);
255 return (ar != 0) + 2 * carry_out;
258 static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
260 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
261 return 3; /* overflow */
273 static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
286 static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
288 /* We had borrow-in if normal subtraction isn't equal. */
289 int borrow_in = ar - (a1 - a2);
292 /* If a2 was UINT_MAX, and borrow_in, then a2 is logically 65 bits,
293 and we must have had borrow out. */
294 if (borrow_in && a2 == (uint32_t)-1) {
298 borrow_out = (a2 > a1);
301 return (ar != 0) + 2 * !borrow_out;
304 static uint32_t cc_calc_abs_32(int32_t dst)
306 if ((uint32_t)dst == 0x80000000UL) {
315 static uint32_t cc_calc_nabs_32(int32_t dst)
320 static uint32_t cc_calc_comp_32(int32_t dst)
322 if ((uint32_t)dst == 0x80000000UL) {
324 } else if (dst < 0) {
326 } else if (dst > 0) {
333 /* calculate condition code for insert character under mask insn */
334 static uint32_t cc_calc_icm_32(uint32_t mask, uint32_t val)
338 HELPER_LOG("%s: mask 0x%x val %d\n", __func__, mask, val);
342 } else if (val & 0x80000000) {
365 static uint32_t cc_calc_slag(uint64_t src, uint64_t shift)
367 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
370 /* check if the sign bit stays the same */
371 if (src & (1ULL << 63)) {
377 if ((src & mask) != match) {
382 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
384 if ((int64_t)r == 0) {
386 } else if ((int64_t)r < 0) {
394 static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
395 uint64_t src, uint64_t dst, uint64_t vr)
404 /* cc_op value _is_ cc */
408 r = cc_calc_ltgt0_32(dst);
411 r = cc_calc_ltgt0_64(dst);
414 r = cc_calc_ltgt_32(src, dst);
417 r = cc_calc_ltgt_64(src, dst);
419 case CC_OP_LTUGTU_32:
420 r = cc_calc_ltugtu_32(src, dst);
422 case CC_OP_LTUGTU_64:
423 r = cc_calc_ltugtu_64(src, dst);
426 r = cc_calc_tm_32(src, dst);
429 r = cc_calc_tm_64(src, dst);
435 r = cc_calc_add_64(src, dst, vr);
438 r = cc_calc_addu_64(src, dst, vr);
441 r = cc_calc_addc_64(src, dst, vr);
444 r = cc_calc_sub_64(src, dst, vr);
447 r = cc_calc_subu_64(src, dst, vr);
450 r = cc_calc_subb_64(src, dst, vr);
453 r = cc_calc_abs_64(dst);
456 r = cc_calc_nabs_64(dst);
459 r = cc_calc_comp_64(dst);
463 r = cc_calc_add_32(src, dst, vr);
466 r = cc_calc_addu_32(src, dst, vr);
469 r = cc_calc_addc_32(src, dst, vr);
472 r = cc_calc_sub_32(src, dst, vr);
475 r = cc_calc_subu_32(src, dst, vr);
478 r = cc_calc_subb_32(src, dst, vr);
481 r = cc_calc_abs_32(dst);
484 r = cc_calc_nabs_32(dst);
487 r = cc_calc_comp_32(dst);
491 r = cc_calc_icm_32(src, dst);
494 r = cc_calc_slag(src, dst);
498 r = set_cc_f32(env, src, dst);
501 r = set_cc_f64(env, src, dst);
504 r = set_cc_nz_f32(dst);
507 r = set_cc_nz_f64(dst);
511 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
514 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
515 cc_name(cc_op), src, dst, vr, r);
519 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
522 return do_calc_cc(env, cc_op, src, dst, vr);
525 uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
526 uint64_t dst, uint64_t vr)
528 return do_calc_cc(env, cc_op, src, dst, vr);
531 /* insert psw mask and condition code into r1 */
532 void HELPER(ipm)(CPUS390XState *env, uint32_t cc, uint32_t r1)
534 uint64_t r = env->regs[r1];
536 r &= 0xffffffff00ffffffULL;
537 r |= (cc << 28) | ((env->psw.mask >> 40) & 0xf);
539 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __func__,
540 cc, env->psw.mask, r);
543 #ifndef CONFIG_USER_ONLY
544 void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
546 load_psw(env, mask, addr);
550 void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
552 HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
554 switch (a1 & 0xf00) {
556 env->psw.mask &= ~PSW_MASK_ASC;
557 env->psw.mask |= PSW_ASC_PRIMARY;
560 env->psw.mask &= ~PSW_MASK_ASC;
561 env->psw.mask |= PSW_ASC_SECONDARY;
564 env->psw.mask &= ~PSW_MASK_ASC;
565 env->psw.mask |= PSW_ASC_HOME;
568 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
569 program_interrupt(env, PGM_SPECIFICATION, 2);