2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 /*****************************************************************************/
26 #if !defined(CONFIG_USER_ONLY)
27 #include "exec/softmmu_exec.h"
29 #define MMUSUFFIX _mmu
32 #include "exec/softmmu_template.h"
35 #include "exec/softmmu_template.h"
38 #include "exec/softmmu_template.h"
41 #include "exec/softmmu_template.h"
43 /* try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */
46 /* XXX: fix it to restore all registers */
47 void tlb_fill(CPUS390XState *env, target_ulong addr, int is_write, int mmu_idx,
52 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
53 if (unlikely(ret != 0)) {
54 if (likely(retaddr)) {
55 /* now we have a real cpu fault */
56 cpu_restore_state(env, retaddr);
64 /* #define DEBUG_HELPER */
66 #define HELPER_LOG(x...) qemu_log(x)
68 #define HELPER_LOG(x...)
71 #ifndef CONFIG_USER_ONLY
72 static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
78 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
81 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
82 cpu_stb_data(env, dest, byte);
83 cpu_abort(env, "should never reach here");
85 dest_phys |= dest & ~TARGET_PAGE_MASK;
87 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
89 memset(dest_p, byte, len);
91 cpu_physical_memory_unmap(dest_p, 1, len, len);
94 static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
102 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
105 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
106 cpu_stb_data(env, dest, 0);
107 cpu_abort(env, "should never reach here");
109 dest_phys |= dest & ~TARGET_PAGE_MASK;
111 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
112 cpu_ldub_data(env, src);
113 cpu_abort(env, "should never reach here");
115 src_phys |= src & ~TARGET_PAGE_MASK;
117 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
118 src_p = cpu_physical_memory_map(src_phys, &len, 0);
120 memmove(dest_p, src_p, len);
122 cpu_physical_memory_unmap(dest_p, 1, len, len);
123 cpu_physical_memory_unmap(src_p, 0, len, len);
128 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
135 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
136 __func__, l, dest, src);
137 for (i = 0; i <= l; i++) {
138 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
142 cpu_stb_data(env, dest + i, x);
148 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
155 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
156 __func__, l, dest, src);
158 #ifndef CONFIG_USER_ONLY
159 /* xor with itself is the same as memset(0) */
160 if ((l > 32) && (src == dest) &&
161 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
162 mvc_fast_memset(env, l + 1, dest, 0);
167 memset(g2h(dest), 0, l + 1);
172 for (i = 0; i <= l; i++) {
173 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
177 cpu_stb_data(env, dest + i, x);
183 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
190 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
191 __func__, l, dest, src);
192 for (i = 0; i <= l; i++) {
193 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
197 cpu_stb_data(env, dest + i, x);
203 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
207 uint32_t l_64 = (l + 1) / 8;
209 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
210 __func__, l, dest, src);
212 #ifndef CONFIG_USER_ONLY
214 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
215 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
216 if (dest == (src + 1)) {
217 mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src));
219 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
220 mvc_fast_memmove(env, l + 1, dest, src);
225 if (dest == (src + 1)) {
226 memset(g2h(dest), cpu_ldub_data(env, src), l + 1);
229 memmove(g2h(dest), g2h(src), l + 1);
234 /* handle the parts that fit into 8-byte loads/stores */
235 if (dest != (src + 1)) {
236 for (i = 0; i < l_64; i++) {
237 cpu_stq_data(env, dest + x, cpu_ldq_data(env, src + x));
242 /* slow version crossing pages with byte accesses */
243 for (i = x; i <= l; i++) {
244 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
248 /* compare unsigned byte arrays */
249 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
255 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
256 __func__, l, s1, s2);
257 for (i = 0; i <= l; i++) {
258 x = cpu_ldub_data(env, s1 + i);
259 y = cpu_ldub_data(env, s2 + i);
260 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
275 /* compare logical under mask */
276 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
282 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
287 d = cpu_ldub_data(env, addr);
288 r = (r1 & 0xff000000UL) >> 24;
289 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
300 mask = (mask << 1) & 0xf;
307 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
320 if (!(env->psw.mask & PSW_MASK_64)) {
327 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
329 uint64_t r = env->regs[reg];
332 if (!(env->psw.mask & PSW_MASK_64)) {
339 /* search string (c is byte to search, r2 is string, r1 end of string) */
340 uint32_t HELPER(srst)(CPUS390XState *env, uint32_t c, uint32_t r1, uint32_t r2)
344 uint64_t str = get_address_31fix(env, r2);
345 uint64_t end = get_address_31fix(env, r1);
347 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __func__,
348 c, env->regs[r1], env->regs[r2]);
350 for (i = str; i != end; i++) {
351 if (cpu_ldub_data(env, i) == c) {
361 /* unsigned string compare (c is string terminator) */
362 uint32_t HELPER(clst)(CPUS390XState *env, uint32_t c, uint32_t r1, uint32_t r2)
364 uint64_t s1 = get_address_31fix(env, r1);
365 uint64_t s2 = get_address_31fix(env, r2);
370 #ifdef CONFIG_USER_ONLY
372 HELPER_LOG("%s: comparing '%s' and '%s'\n",
373 __func__, (char *)g2h(s1), (char *)g2h(s2));
377 v1 = cpu_ldub_data(env, s1);
378 v2 = cpu_ldub_data(env, s2);
379 if ((v1 == c || v2 == c) || (v1 != v2)) {
389 cc = (v1 < v2) ? 1 : 2;
390 /* FIXME: 31-bit mode! */
398 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
400 /* XXX missing r0 handling */
401 #ifdef CONFIG_USER_ONLY
404 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
405 cpu_stb_data(env, r1 + i, cpu_ldub_data(env, r2 + i));
408 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
412 /* string copy (c is string terminator) */
413 void HELPER(mvst)(CPUS390XState *env, uint32_t c, uint32_t r1, uint32_t r2)
415 uint64_t dest = get_address_31fix(env, r1);
416 uint64_t src = get_address_31fix(env, r2);
420 #ifdef CONFIG_USER_ONLY
422 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __func__, (char *)g2h(src),
427 v = cpu_ldub_data(env, src);
428 cpu_stb_data(env, dest, v);
435 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
438 /* compare and swap 64-bit */
439 uint64_t HELPER(csg)(CPUS390XState *env, uint64_t r1, uint64_t a2, uint64_t r3)
441 /* FIXME: locking? */
442 uint64_t v2 = cpu_ldq_data(env, a2);
444 cpu_stq_data(env, a2, r3);
453 /* compare double and swap 64-bit */
454 uint32_t HELPER(cdsg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
456 /* FIXME: locking? */
458 uint64_t v2_hi = cpu_ldq_data(env, a2);
459 uint64_t v2_lo = cpu_ldq_data(env, a2 + 8);
460 uint64_t v1_hi = env->regs[r1];
461 uint64_t v1_lo = env->regs[r1 + 1];
463 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
465 cpu_stq_data(env, a2, env->regs[r3]);
466 cpu_stq_data(env, a2 + 8, env->regs[r3 + 1]);
469 env->regs[r1] = v2_hi;
470 env->regs[r1 + 1] = v2_lo;
476 /* compare and swap 32-bit */
477 uint64_t HELPER(cs)(CPUS390XState *env, uint64_t r1, uint64_t a2, uint64_t r3)
479 /* FIXME: locking? */
480 uint32_t v2 = cpu_ldl_data(env, a2);
481 if ((uint32_t)r1 == v2) {
482 cpu_stl_data(env, a2, (uint32_t)r3);
491 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
494 int pos = 24; /* top of the lower half of r1 */
495 uint64_t rmask = 0xff000000ULL;
502 env->regs[r1] &= ~rmask;
503 val = cpu_ldub_data(env, address);
504 if ((val & 0x80) && !ccd) {
508 if (val && cc == 0) {
511 env->regs[r1] |= (uint64_t)val << pos;
514 mask = (mask << 1) & 0xf;
522 /* execute instruction
523 this instruction executes an insn modified with the contents of r1
524 it does not change the executed instruction in memory
525 it does not change the program counter
526 in other words: tricky...
527 currently implemented by interpreting the cases it is most commonly used in
529 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
530 uint64_t addr, uint64_t ret)
532 uint16_t insn = cpu_lduw_code(env, addr);
534 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
536 if ((insn & 0xf0ff) == 0xd000) {
537 uint32_t l, insn2, b1, b2, d1, d2;
540 insn2 = cpu_ldl_code(env, addr + 2);
541 b1 = (insn2 >> 28) & 0xf;
542 b2 = (insn2 >> 12) & 0xf;
543 d1 = (insn2 >> 16) & 0xfff;
545 switch (insn & 0xf00) {
547 helper_mvc(env, l, get_address(env, 0, b1, d1),
548 get_address(env, 0, b2, d2));
551 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
552 get_address(env, 0, b2, d2));
555 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
556 get_address(env, 0, b2, d2));
559 helper_tr(env, l, get_address(env, 0, b1, d1),
560 get_address(env, 0, b2, d2));
566 } else if ((insn & 0xff00) == 0x0a00) {
567 /* supervisor call */
568 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
569 env->psw.addr = ret - 4;
570 env->int_svc_code = (insn | v1) & 0xff;
571 env->int_svc_ilen = 4;
572 helper_exception(env, EXCP_SVC);
573 } else if ((insn & 0xff00) == 0xbf00) {
574 uint32_t insn2, r1, r3, b2, d2;
576 insn2 = cpu_ldl_code(env, addr + 2);
577 r1 = (insn2 >> 20) & 0xf;
578 r3 = (insn2 >> 16) & 0xf;
579 b2 = (insn2 >> 12) & 0xf;
581 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
584 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
590 /* load access registers r1 to r3 from memory at a2 */
591 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
595 for (i = r1;; i = (i + 1) % 16) {
596 env->aregs[i] = cpu_ldl_data(env, a2);
605 /* store access registers r1 to r3 in memory at a2 */
606 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
610 for (i = r1;; i = (i + 1) % 16) {
611 cpu_stl_data(env, a2, env->aregs[i]);
621 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
623 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
624 uint64_t dest = get_address_31fix(env, r1);
625 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
626 uint64_t src = get_address_31fix(env, r2);
627 uint8_t pad = src >> 24;
631 if (destlen == srclen) {
633 } else if (destlen < srclen) {
639 if (srclen > destlen) {
643 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
644 v = cpu_ldub_data(env, src);
645 cpu_stb_data(env, dest, v);
648 for (; destlen; dest++, destlen--) {
649 cpu_stb_data(env, dest, pad);
652 env->regs[r1 + 1] = destlen;
653 /* can't use srclen here, we trunc'ed it */
654 env->regs[r2 + 1] -= src - env->regs[r2];
655 env->regs[r1] = dest;
661 /* move long extended another memcopy insn with more bells and whistles */
662 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
665 uint64_t destlen = env->regs[r1 + 1];
666 uint64_t dest = env->regs[r1];
667 uint64_t srclen = env->regs[r3 + 1];
668 uint64_t src = env->regs[r3];
669 uint8_t pad = a2 & 0xff;
673 if (!(env->psw.mask & PSW_MASK_64)) {
674 destlen = (uint32_t)destlen;
675 srclen = (uint32_t)srclen;
680 if (destlen == srclen) {
682 } else if (destlen < srclen) {
688 if (srclen > destlen) {
692 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
693 v = cpu_ldub_data(env, src);
694 cpu_stb_data(env, dest, v);
697 for (; destlen; dest++, destlen--) {
698 cpu_stb_data(env, dest, pad);
701 env->regs[r1 + 1] = destlen;
702 /* can't use srclen here, we trunc'ed it */
703 /* FIXME: 31-bit mode! */
704 env->regs[r3 + 1] -= src - env->regs[r3];
705 env->regs[r1] = dest;
711 /* compare logical long extended memcompare insn with padding */
712 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
715 uint64_t destlen = env->regs[r1 + 1];
716 uint64_t dest = get_address_31fix(env, r1);
717 uint64_t srclen = env->regs[r3 + 1];
718 uint64_t src = get_address_31fix(env, r3);
719 uint8_t pad = a2 & 0xff;
720 uint8_t v1 = 0, v2 = 0;
723 if (!(destlen || srclen)) {
727 if (srclen > destlen) {
731 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
732 v1 = srclen ? cpu_ldub_data(env, src) : pad;
733 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
735 cc = (v1 < v2) ? 1 : 2;
740 env->regs[r1 + 1] = destlen;
741 /* can't use srclen here, we trunc'ed it */
742 env->regs[r3 + 1] -= src - env->regs[r3];
743 env->regs[r1] = dest;
750 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
751 uint64_t src, uint64_t src_len)
753 uint64_t max_len, len;
754 uint64_t cksm = (uint32_t)r1;
756 /* Lest we fail to service interrupts in a timely manner, limit the
757 amount of work we're willing to do. For now, lets cap at 8k. */
758 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
760 /* Process full words as available. */
761 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
762 cksm += (uint32_t)cpu_ldl_data(env, src);
765 switch (max_len - len) {
767 cksm += cpu_ldub_data(env, src) << 24;
771 cksm += cpu_lduw_data(env, src) << 16;
775 cksm += cpu_lduw_data(env, src) << 16;
776 cksm += cpu_ldub_data(env, src + 2) << 8;
781 /* Fold the carry from the checksum. Note that we can see carry-out
782 during folding more than once (but probably not more than twice). */
783 while (cksm > 0xffffffffull) {
784 cksm = (uint32_t)cksm + (cksm >> 32);
787 /* Indicate whether or not we've processed everything. */
788 env->cc_op = (len == src_len ? 0 : 3);
790 /* Return both cksm and processed length. */
795 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
798 int len_dest = len >> 4;
799 int len_src = len & 0xf;
801 int second_nibble = 0;
806 /* last byte is special, it only flips the nibbles */
807 b = cpu_ldub_data(env, src);
808 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
812 /* now pad every nibble with 0xf0 */
814 while (len_dest > 0) {
815 uint8_t cur_byte = 0;
818 cur_byte = cpu_ldub_data(env, src);
824 /* only advance one nibble at a time */
830 second_nibble = !second_nibble;
833 cur_byte = (cur_byte & 0xf);
837 cpu_stb_data(env, dest, cur_byte);
841 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
846 for (i = 0; i <= len; i++) {
847 uint8_t byte = cpu_ldub_data(env, array + i);
848 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
850 cpu_stb_data(env, array + i, new_byte);
854 #if !defined(CONFIG_USER_ONLY)
855 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
860 for (i = r1;; i = (i + 1) % 16) {
861 env->cregs[i] = cpu_ldq_data(env, src);
862 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
863 i, src, env->cregs[i]);
864 src += sizeof(uint64_t);
874 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
879 for (i = r1;; i = (i + 1) % 16) {
880 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) |
881 cpu_ldl_data(env, src);
882 src += sizeof(uint32_t);
892 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
897 for (i = r1;; i = (i + 1) % 16) {
898 cpu_stq_data(env, dest, env->cregs[i]);
899 dest += sizeof(uint64_t);
907 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
912 for (i = r1;; i = (i + 1) % 16) {
913 cpu_stl_data(env, dest, env->cregs[i]);
914 dest += sizeof(uint32_t);
922 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
929 /* insert storage key extended */
930 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
932 uint64_t addr = get_address(env, 0, 0, r2);
934 if (addr > ram_size) {
938 return env->storage_keys[addr / TARGET_PAGE_SIZE];
941 /* set storage key extended */
942 void HELPER(sske)(CPUS390XState *env, uint32_t r1, uint64_t r2)
944 uint64_t addr = get_address(env, 0, 0, r2);
946 if (addr > ram_size) {
950 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
953 /* reset reference bit extended */
954 uint32_t HELPER(rrbe)(CPUS390XState *env, uint32_t r1, uint64_t r2)
963 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
964 re = key & (SK_R | SK_C);
965 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
970 * 0 Reference bit zero; change bit zero
971 * 1 Reference bit zero; change bit one
972 * 2 Reference bit one; change bit zero
973 * 3 Reference bit one; change bit one
979 /* compare and swap and purge */
980 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint32_t r2)
983 uint32_t o1 = env->regs[r1];
984 uint64_t a2 = get_address_31fix(env, r2) & ~3ULL;
985 uint32_t o2 = cpu_ldl_data(env, a2);
988 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
989 if (env->regs[r2] & 0x3) {
990 /* flush TLB / ALB */
995 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
1002 static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
1003 uint64_t mode1, uint64_t a2, uint64_t mode2)
1005 target_ulong src, dest;
1006 int flags, cc = 0, i;
1010 } else if (l > 256) {
1016 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
1019 dest |= a1 & ~TARGET_PAGE_MASK;
1021 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
1024 src |= a2 & ~TARGET_PAGE_MASK;
1026 /* XXX replace w/ memcpy */
1027 for (i = 0; i < l; i++) {
1028 /* XXX be more clever */
1029 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
1030 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
1031 mvc_asc(env, l - i, a1 + i, mode1, a2 + i, mode2);
1034 stb_phys(dest + i, ldub_phys(src + i));
1040 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1042 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1043 __func__, l, a1, a2);
1045 return mvc_asc(env, l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
1048 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1050 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1051 __func__, l, a1, a2);
1053 return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
1056 /* invalidate pte */
1057 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1059 uint64_t page = vaddr & TARGET_PAGE_MASK;
1062 /* XXX broadcast to other CPUs */
1064 /* XXX Linux is nice enough to give us the exact pte address.
1065 According to spec we'd have to find it out ourselves */
1066 /* XXX Linux is fine with overwriting the pte, the spec requires
1067 us to only set the invalid bit */
1068 stq_phys(pte_addr, pte | _PAGE_INVALID);
1070 /* XXX we exploit the fact that Linux passes the exact virtual
1071 address here - it's not obliged to! */
1072 tlb_flush_page(env, page);
1074 /* XXX 31-bit hack */
1075 if (page & 0x80000000) {
1076 tlb_flush_page(env, page & ~0x80000000);
1078 tlb_flush_page(env, page | 0x80000000);
1082 /* flush local tlb */
1083 void HELPER(ptlb)(CPUS390XState *env)
1088 /* store using real address */
1089 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint32_t v1)
1091 stw_phys(get_address(env, 0, 0, addr), v1);
1094 /* load real address */
1095 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1098 int old_exc = env->exception_index;
1099 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1103 /* XXX incomplete - has more corner cases */
1104 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1105 program_interrupt(env, PGM_SPECIAL_OP, 2);
1108 env->exception_index = old_exc;
1109 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
1112 if (env->exception_index == EXCP_PGM) {
1113 ret = env->int_pgm_code | 0x80000000;
1115 ret |= addr & ~TARGET_PAGE_MASK;
1117 env->exception_index = old_exc;