2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 /*****************************************************************************/
26 #if !defined(CONFIG_USER_ONLY)
27 #include "exec/softmmu_exec.h"
29 #define MMUSUFFIX _mmu
32 #include "exec/softmmu_template.h"
35 #include "exec/softmmu_template.h"
38 #include "exec/softmmu_template.h"
41 #include "exec/softmmu_template.h"
43 /* try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */
46 /* XXX: fix it to restore all registers */
47 void tlb_fill(CPUS390XState *env, target_ulong addr, int is_write, int mmu_idx,
52 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
53 if (unlikely(ret != 0)) {
54 if (likely(retaddr)) {
55 /* now we have a real cpu fault */
56 cpu_restore_state(env, retaddr);
64 /* #define DEBUG_HELPER */
66 #define HELPER_LOG(x...) qemu_log(x)
68 #define HELPER_LOG(x...)
71 #ifndef CONFIG_USER_ONLY
72 static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
78 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
81 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
82 cpu_stb_data(env, dest, byte);
83 cpu_abort(env, "should never reach here");
85 dest_phys |= dest & ~TARGET_PAGE_MASK;
87 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
89 memset(dest_p, byte, len);
91 cpu_physical_memory_unmap(dest_p, 1, len, len);
94 static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
102 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
105 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
106 cpu_stb_data(env, dest, 0);
107 cpu_abort(env, "should never reach here");
109 dest_phys |= dest & ~TARGET_PAGE_MASK;
111 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
112 cpu_ldub_data(env, src);
113 cpu_abort(env, "should never reach here");
115 src_phys |= src & ~TARGET_PAGE_MASK;
117 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
118 src_p = cpu_physical_memory_map(src_phys, &len, 0);
120 memmove(dest_p, src_p, len);
122 cpu_physical_memory_unmap(dest_p, 1, len, len);
123 cpu_physical_memory_unmap(src_p, 0, len, len);
128 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
135 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
136 __func__, l, dest, src);
137 for (i = 0; i <= l; i++) {
138 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
142 cpu_stb_data(env, dest + i, x);
148 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
155 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
156 __func__, l, dest, src);
158 #ifndef CONFIG_USER_ONLY
159 /* xor with itself is the same as memset(0) */
160 if ((l > 32) && (src == dest) &&
161 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
162 mvc_fast_memset(env, l + 1, dest, 0);
167 memset(g2h(dest), 0, l + 1);
172 for (i = 0; i <= l; i++) {
173 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
177 cpu_stb_data(env, dest + i, x);
183 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
190 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
191 __func__, l, dest, src);
192 for (i = 0; i <= l; i++) {
193 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
197 cpu_stb_data(env, dest + i, x);
203 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
207 uint32_t l_64 = (l + 1) / 8;
209 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
210 __func__, l, dest, src);
212 #ifndef CONFIG_USER_ONLY
214 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
215 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
216 if (dest == (src + 1)) {
217 mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src));
219 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
220 mvc_fast_memmove(env, l + 1, dest, src);
225 if (dest == (src + 1)) {
226 memset(g2h(dest), cpu_ldub_data(env, src), l + 1);
229 memmove(g2h(dest), g2h(src), l + 1);
234 /* handle the parts that fit into 8-byte loads/stores */
235 if (dest != (src + 1)) {
236 for (i = 0; i < l_64; i++) {
237 cpu_stq_data(env, dest + x, cpu_ldq_data(env, src + x));
242 /* slow version crossing pages with byte accesses */
243 for (i = x; i <= l; i++) {
244 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
248 /* compare unsigned byte arrays */
249 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
255 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
256 __func__, l, s1, s2);
257 for (i = 0; i <= l; i++) {
258 x = cpu_ldub_data(env, s1 + i);
259 y = cpu_ldub_data(env, s2 + i);
260 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
275 /* compare logical under mask */
276 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
282 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
287 d = cpu_ldub_data(env, addr);
288 r = (r1 & 0xff000000UL) >> 24;
289 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
300 mask = (mask << 1) & 0xf;
307 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
320 if (!(env->psw.mask & PSW_MASK_64)) {
327 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
329 uint64_t r = env->regs[reg];
332 if (!(env->psw.mask & PSW_MASK_64)) {
339 /* search string (c is byte to search, r2 is string, r1 end of string) */
340 uint32_t HELPER(srst)(CPUS390XState *env, uint32_t c, uint32_t r1, uint32_t r2)
344 uint64_t str = get_address_31fix(env, r2);
345 uint64_t end = get_address_31fix(env, r1);
347 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __func__,
348 c, env->regs[r1], env->regs[r2]);
350 for (i = str; i != end; i++) {
351 if (cpu_ldub_data(env, i) == c) {
361 /* unsigned string compare (c is string terminator) */
362 uint32_t HELPER(clst)(CPUS390XState *env, uint32_t c, uint32_t r1, uint32_t r2)
364 uint64_t s1 = get_address_31fix(env, r1);
365 uint64_t s2 = get_address_31fix(env, r2);
370 #ifdef CONFIG_USER_ONLY
372 HELPER_LOG("%s: comparing '%s' and '%s'\n",
373 __func__, (char *)g2h(s1), (char *)g2h(s2));
377 v1 = cpu_ldub_data(env, s1);
378 v2 = cpu_ldub_data(env, s2);
379 if ((v1 == c || v2 == c) || (v1 != v2)) {
389 cc = (v1 < v2) ? 1 : 2;
390 /* FIXME: 31-bit mode! */
398 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
400 /* XXX missing r0 handling */
402 #ifdef CONFIG_USER_ONLY
403 memmove(g2h(r1), g2h(r2), TARGET_PAGE_SIZE);
405 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
409 /* string copy (c is string terminator) */
410 void HELPER(mvst)(CPUS390XState *env, uint32_t c, uint32_t r1, uint32_t r2)
412 uint64_t dest = get_address_31fix(env, r1);
413 uint64_t src = get_address_31fix(env, r2);
417 #ifdef CONFIG_USER_ONLY
419 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __func__, (char *)g2h(src),
424 v = cpu_ldub_data(env, src);
425 cpu_stb_data(env, dest, v);
432 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
435 /* compare and swap 64-bit */
436 uint64_t HELPER(csg)(CPUS390XState *env, uint64_t r1, uint64_t a2, uint64_t r3)
438 /* FIXME: locking? */
439 uint64_t v2 = cpu_ldq_data(env, a2);
441 cpu_stq_data(env, a2, r3);
450 /* compare double and swap 64-bit */
451 uint32_t HELPER(cdsg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
453 /* FIXME: locking? */
455 uint64_t v2_hi = cpu_ldq_data(env, a2);
456 uint64_t v2_lo = cpu_ldq_data(env, a2 + 8);
457 uint64_t v1_hi = env->regs[r1];
458 uint64_t v1_lo = env->regs[r1 + 1];
460 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
462 cpu_stq_data(env, a2, env->regs[r3]);
463 cpu_stq_data(env, a2 + 8, env->regs[r3 + 1]);
466 env->regs[r1] = v2_hi;
467 env->regs[r1 + 1] = v2_lo;
473 /* compare and swap 32-bit */
474 uint64_t HELPER(cs)(CPUS390XState *env, uint64_t r1, uint64_t a2, uint64_t r3)
476 /* FIXME: locking? */
477 uint32_t v2 = cpu_ldl_data(env, a2);
478 if ((uint32_t)r1 == v2) {
479 cpu_stl_data(env, a2, (uint32_t)r3);
488 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
491 int pos = 24; /* top of the lower half of r1 */
492 uint64_t rmask = 0xff000000ULL;
499 env->regs[r1] &= ~rmask;
500 val = cpu_ldub_data(env, address);
501 if ((val & 0x80) && !ccd) {
505 if (val && cc == 0) {
508 env->regs[r1] |= (uint64_t)val << pos;
511 mask = (mask << 1) & 0xf;
519 /* execute instruction
520 this instruction executes an insn modified with the contents of r1
521 it does not change the executed instruction in memory
522 it does not change the program counter
523 in other words: tricky...
524 currently implemented by interpreting the cases it is most commonly used in
526 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
527 uint64_t addr, uint64_t ret)
529 uint16_t insn = cpu_lduw_code(env, addr);
531 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
533 if ((insn & 0xf0ff) == 0xd000) {
534 uint32_t l, insn2, b1, b2, d1, d2;
537 insn2 = cpu_ldl_code(env, addr + 2);
538 b1 = (insn2 >> 28) & 0xf;
539 b2 = (insn2 >> 12) & 0xf;
540 d1 = (insn2 >> 16) & 0xfff;
542 switch (insn & 0xf00) {
544 helper_mvc(env, l, get_address(env, 0, b1, d1),
545 get_address(env, 0, b2, d2));
548 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
549 get_address(env, 0, b2, d2));
552 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
553 get_address(env, 0, b2, d2));
556 helper_tr(env, l, get_address(env, 0, b1, d1),
557 get_address(env, 0, b2, d2));
563 } else if ((insn & 0xff00) == 0x0a00) {
564 /* supervisor call */
565 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
566 env->psw.addr = ret - 4;
567 env->int_svc_code = (insn | v1) & 0xff;
568 env->int_svc_ilen = 4;
569 helper_exception(env, EXCP_SVC);
570 } else if ((insn & 0xff00) == 0xbf00) {
571 uint32_t insn2, r1, r3, b2, d2;
573 insn2 = cpu_ldl_code(env, addr + 2);
574 r1 = (insn2 >> 20) & 0xf;
575 r3 = (insn2 >> 16) & 0xf;
576 b2 = (insn2 >> 12) & 0xf;
578 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
581 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
587 /* load access registers r1 to r3 from memory at a2 */
588 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
592 for (i = r1;; i = (i + 1) % 16) {
593 env->aregs[i] = cpu_ldl_data(env, a2);
602 /* store access registers r1 to r3 in memory at a2 */
603 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
607 for (i = r1;; i = (i + 1) % 16) {
608 cpu_stl_data(env, a2, env->aregs[i]);
618 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
620 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
621 uint64_t dest = get_address_31fix(env, r1);
622 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
623 uint64_t src = get_address_31fix(env, r2);
624 uint8_t pad = src >> 24;
628 if (destlen == srclen) {
630 } else if (destlen < srclen) {
636 if (srclen > destlen) {
640 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
641 v = cpu_ldub_data(env, src);
642 cpu_stb_data(env, dest, v);
645 for (; destlen; dest++, destlen--) {
646 cpu_stb_data(env, dest, pad);
649 env->regs[r1 + 1] = destlen;
650 /* can't use srclen here, we trunc'ed it */
651 env->regs[r2 + 1] -= src - env->regs[r2];
652 env->regs[r1] = dest;
658 /* move long extended another memcopy insn with more bells and whistles */
659 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
662 uint64_t destlen = env->regs[r1 + 1];
663 uint64_t dest = env->regs[r1];
664 uint64_t srclen = env->regs[r3 + 1];
665 uint64_t src = env->regs[r3];
666 uint8_t pad = a2 & 0xff;
670 if (!(env->psw.mask & PSW_MASK_64)) {
671 destlen = (uint32_t)destlen;
672 srclen = (uint32_t)srclen;
677 if (destlen == srclen) {
679 } else if (destlen < srclen) {
685 if (srclen > destlen) {
689 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
690 v = cpu_ldub_data(env, src);
691 cpu_stb_data(env, dest, v);
694 for (; destlen; dest++, destlen--) {
695 cpu_stb_data(env, dest, pad);
698 env->regs[r1 + 1] = destlen;
699 /* can't use srclen here, we trunc'ed it */
700 /* FIXME: 31-bit mode! */
701 env->regs[r3 + 1] -= src - env->regs[r3];
702 env->regs[r1] = dest;
708 /* compare logical long extended memcompare insn with padding */
709 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
712 uint64_t destlen = env->regs[r1 + 1];
713 uint64_t dest = get_address_31fix(env, r1);
714 uint64_t srclen = env->regs[r3 + 1];
715 uint64_t src = get_address_31fix(env, r3);
716 uint8_t pad = a2 & 0xff;
717 uint8_t v1 = 0, v2 = 0;
720 if (!(destlen || srclen)) {
724 if (srclen > destlen) {
728 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
729 v1 = srclen ? cpu_ldub_data(env, src) : pad;
730 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
732 cc = (v1 < v2) ? 1 : 2;
737 env->regs[r1 + 1] = destlen;
738 /* can't use srclen here, we trunc'ed it */
739 env->regs[r3 + 1] -= src - env->regs[r3];
740 env->regs[r1] = dest;
747 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
748 uint64_t src, uint64_t src_len)
750 uint64_t max_len, len;
751 uint64_t cksm = (uint32_t)r1;
753 /* Lest we fail to service interrupts in a timely manner, limit the
754 amount of work we're willing to do. For now, lets cap at 8k. */
755 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
757 /* Process full words as available. */
758 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
759 cksm += (uint32_t)cpu_ldl_data(env, src);
762 switch (max_len - len) {
764 cksm += cpu_ldub_data(env, src) << 24;
768 cksm += cpu_lduw_data(env, src) << 16;
772 cksm += cpu_lduw_data(env, src) << 16;
773 cksm += cpu_ldub_data(env, src + 2) << 8;
778 /* Fold the carry from the checksum. Note that we can see carry-out
779 during folding more than once (but probably not more than twice). */
780 while (cksm > 0xffffffffull) {
781 cksm = (uint32_t)cksm + (cksm >> 32);
784 /* Indicate whether or not we've processed everything. */
785 env->cc_op = (len == src_len ? 0 : 3);
787 /* Return both cksm and processed length. */
792 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
795 int len_dest = len >> 4;
796 int len_src = len & 0xf;
798 int second_nibble = 0;
803 /* last byte is special, it only flips the nibbles */
804 b = cpu_ldub_data(env, src);
805 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
809 /* now pad every nibble with 0xf0 */
811 while (len_dest > 0) {
812 uint8_t cur_byte = 0;
815 cur_byte = cpu_ldub_data(env, src);
821 /* only advance one nibble at a time */
827 second_nibble = !second_nibble;
830 cur_byte = (cur_byte & 0xf);
834 cpu_stb_data(env, dest, cur_byte);
838 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
843 for (i = 0; i <= len; i++) {
844 uint8_t byte = cpu_ldub_data(env, array + i);
845 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
847 cpu_stb_data(env, array + i, new_byte);
851 #if !defined(CONFIG_USER_ONLY)
852 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
857 for (i = r1;; i = (i + 1) % 16) {
858 env->cregs[i] = cpu_ldq_data(env, src);
859 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
860 i, src, env->cregs[i]);
861 src += sizeof(uint64_t);
871 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
876 for (i = r1;; i = (i + 1) % 16) {
877 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) |
878 cpu_ldl_data(env, src);
879 src += sizeof(uint32_t);
889 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
894 for (i = r1;; i = (i + 1) % 16) {
895 cpu_stq_data(env, dest, env->cregs[i]);
896 dest += sizeof(uint64_t);
904 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
909 for (i = r1;; i = (i + 1) % 16) {
910 cpu_stl_data(env, dest, env->cregs[i]);
911 dest += sizeof(uint32_t);
919 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
926 /* insert storage key extended */
927 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
929 uint64_t addr = get_address(env, 0, 0, r2);
931 if (addr > ram_size) {
935 return env->storage_keys[addr / TARGET_PAGE_SIZE];
938 /* set storage key extended */
939 void HELPER(sske)(CPUS390XState *env, uint32_t r1, uint64_t r2)
941 uint64_t addr = get_address(env, 0, 0, r2);
943 if (addr > ram_size) {
947 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
950 /* reset reference bit extended */
951 uint32_t HELPER(rrbe)(CPUS390XState *env, uint32_t r1, uint64_t r2)
960 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
961 re = key & (SK_R | SK_C);
962 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
967 * 0 Reference bit zero; change bit zero
968 * 1 Reference bit zero; change bit one
969 * 2 Reference bit one; change bit zero
970 * 3 Reference bit one; change bit one
976 /* compare and swap and purge */
977 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint32_t r2)
980 uint32_t o1 = env->regs[r1];
981 uint64_t a2 = get_address_31fix(env, r2) & ~3ULL;
982 uint32_t o2 = cpu_ldl_data(env, a2);
985 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
986 if (env->regs[r2] & 0x3) {
987 /* flush TLB / ALB */
992 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
999 static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
1000 uint64_t mode1, uint64_t a2, uint64_t mode2)
1002 target_ulong src, dest;
1003 int flags, cc = 0, i;
1007 } else if (l > 256) {
1013 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
1016 dest |= a1 & ~TARGET_PAGE_MASK;
1018 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
1021 src |= a2 & ~TARGET_PAGE_MASK;
1023 /* XXX replace w/ memcpy */
1024 for (i = 0; i < l; i++) {
1025 /* XXX be more clever */
1026 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
1027 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
1028 mvc_asc(env, l - i, a1 + i, mode1, a2 + i, mode2);
1031 stb_phys(dest + i, ldub_phys(src + i));
1037 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1039 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1040 __func__, l, a1, a2);
1042 return mvc_asc(env, l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
1045 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1047 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1048 __func__, l, a1, a2);
1050 return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
1053 /* invalidate pte */
1054 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1056 uint64_t page = vaddr & TARGET_PAGE_MASK;
1059 /* XXX broadcast to other CPUs */
1061 /* XXX Linux is nice enough to give us the exact pte address.
1062 According to spec we'd have to find it out ourselves */
1063 /* XXX Linux is fine with overwriting the pte, the spec requires
1064 us to only set the invalid bit */
1065 stq_phys(pte_addr, pte | _PAGE_INVALID);
1067 /* XXX we exploit the fact that Linux passes the exact virtual
1068 address here - it's not obliged to! */
1069 tlb_flush_page(env, page);
1071 /* XXX 31-bit hack */
1072 if (page & 0x80000000) {
1073 tlb_flush_page(env, page & ~0x80000000);
1075 tlb_flush_page(env, page | 0x80000000);
1079 /* flush local tlb */
1080 void HELPER(ptlb)(CPUS390XState *env)
1085 /* store using real address */
1086 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint32_t v1)
1088 stw_phys(get_address(env, 0, 0, addr), v1);
1091 /* load real address */
1092 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1095 int old_exc = env->exception_index;
1096 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1100 /* XXX incomplete - has more corner cases */
1101 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1102 program_interrupt(env, PGM_SPECIAL_OP, 2);
1105 env->exception_index = old_exc;
1106 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
1109 if (env->exception_index == EXCP_PGM) {
1110 ret = env->int_pgm_code | 0x80000000;
1112 ret |= addr & ~TARGET_PAGE_MASK;
1114 env->exception_index = old_exc;