iohandler.o cutils.o iov.o async.o
tools-obj-$(CONFIG_POSIX) += compatfd.o
-qemu-img$(EXESUF): qemu-img.o $(tools-obj-y) $(block-obj-y)
+qemu-img$(EXESUF): qemu-img.o $(tools-obj-y) $(block-obj-y) $(qapi-obj-y) \
+ qapi-visit.o qapi-types.o
qemu-nbd$(EXESUF): qemu-nbd.o $(tools-obj-y) $(block-obj-y)
qemu-io$(EXESUF): qemu-io.o cmd.o $(tools-obj-y) $(block-obj-y)
tci-dis.o: QEMU_CFLAGS += -I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/tci
-# HELPER_CFLAGS is used for all the legacy code compiled with static register
-# variables
-user-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
-
-# Note: this is a workaround. The real fix is to avoid compiling
-# cpu_signal_handler() in user-exec.c.
-%/signal.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
-
#########################################################
# Linux user emulator target
if ((i & 63) == 0) {
uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
if (t1 > MAX_WAIT) {
- DPRINTF("big wait: " PRIu64 " milliseconds, %d iterations\n",
+ DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
t1, i);
break;
}
expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
- DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n",
+ DPRINTF("ram_save_live: expected(%" PRIu64 ") <= max(%" PRIu64 ")?\n",
expected_time, migrate_max_downtime());
if (expected_time <= migrate_max_downtime()) {
} while (!(flags & RAM_SAVE_FLAG_EOS));
done:
- DPRINTF("Completed load of VM with exit code %d seq iteration " PRIu64 "\n",
- ret, seq_iter);
+ DPRINTF("Completed load of VM with exit code %d seq iteration "
+ "%" PRIu64 "\n", ret, seq_iter);
return ret;
}
return -EOVERFLOW;
}
fd = mkstemp(filename);
- if (fd < 0 || close(fd)) {
+ if (fd < 0) {
+ return -errno;
+ }
+ if (close(fd) != 0) {
+ unlink(filename);
return -errno;
}
return 0;
bdrv_delete(bs->file);
bs->file = NULL;
}
-
- bdrv_dev_change_media_cb(bs, false);
}
+ bdrv_dev_change_media_cb(bs, false);
+
/*throttling disk I/O limits*/
if (bs->io_limits_enabled) {
bdrv_io_limits_disable(bs);
}
if (s->multi)
curl_multi_cleanup(s->multi);
- if (s->url)
- free(s->url);
+ g_free(s->url);
}
static int64_t curl_getlength(BlockDriverState *bs)
vdi_index = pos / SD_DATA_OBJ_SIZE;
offset = pos % SD_DATA_OBJ_SIZE;
- data_len = MIN(remaining, SD_DATA_OBJ_SIZE);
+ data_len = MIN(remaining, SD_DATA_OBJ_SIZE - offset);
vmstate_oid = vid_to_vmstate_oid(s->inode.vdi_id, vdi_index);
}
pos += data_len;
+ data += data_len;
remaining -= data_len;
}
ret = size;
VdiHeader header;
size_t i;
size_t bmap_size;
- uint32_t *bmap;
logout("\n");
result = -errno;
}
- bmap = NULL;
if (bmap_size > 0) {
- bmap = (uint32_t *)g_malloc0(bmap_size);
- }
- for (i = 0; i < blocks; i++) {
- if (image_type == VDI_TYPE_STATIC) {
- bmap[i] = i;
- } else {
- bmap[i] = VDI_UNALLOCATED;
+ uint32_t *bmap = g_malloc0(bmap_size);
+ for (i = 0; i < blocks; i++) {
+ if (image_type == VDI_TYPE_STATIC) {
+ bmap[i] = i;
+ } else {
+ bmap[i] = VDI_UNALLOCATED;
+ }
}
+ if (write(fd, bmap, bmap_size) < 0) {
+ result = -errno;
+ }
+ g_free(bmap);
}
- if (write(fd, bmap, bmap_size) < 0) {
- result = -errno;
- }
- g_free(bmap);
+
if (image_type == VDI_TYPE_STATIC) {
if (ftruncate(fd, sizeof(header) + bmap_size + blocks * block_size)) {
result = -errno;
audio_possible_cards="ac97 es1370 sb16 cs4231a adlib gus hda"
block_drv_whitelist=""
host_cc="gcc"
-helper_cflags=""
libs_softmmu=""
libs_tools=""
audio_pt_int=""
libs_qga=""
debug_info="yes"
-target_list="DEFAULT"
+target_list=""
# Default value for a variable defining feature "foo".
# * foo="no" feature will only be used if --enable-foo arg is given
QEMU_CFLAGS="-m32 -ffixed-g2 -ffixed-g3 $QEMU_CFLAGS"
if test "$solaris" = "no" ; then
QEMU_CFLAGS="-ffixed-g1 -ffixed-g6 $QEMU_CFLAGS"
- helper_cflags="-ffixed-i0"
fi
;;
sparc64)
QEMU_CFLAGS="-m32 $QEMU_CFLAGS"
LDFLAGS="-m32 $LDFLAGS"
cc_i386='$(CC) -m32'
- helper_cflags="-fomit-frame-pointer"
host_guest_base="yes"
;;
x86_64)
exit 1
fi
-if test "$target_list" = "DEFAULT" ; then
- target_list=`echo "$default_target_list" | sed -e 's/,/ /g'`
+if test -z "$target_list" ; then
+ target_list="$default_target_list"
+else
+ target_list=`echo "$target_list" | sed -e 's/,/ /g'`
+fi
+if test -z "$target_list" ; then
+ echo "No targets enabled"
+ exit 1
fi
-
# see if system emulation was really requested
case " $target_list " in
*"-softmmu "*) softmmu=yes
LIBS=`$pkg_config --libs libseccomp`
seccomp="yes"
else
- seccomp="no"
if test "$seccomp" = "yes"; then
feature_not_found "libseccomp"
fi
+ seccomp="no"
fi
fi
##########################################
fi
+##########################################
+# Do we need libm
+cat > $TMPC << EOF
+#include <math.h>
+int main(void) { return isnan(sin(0.0)); }
+EOF
+if compile_prog "" "" ; then
+ :
+elif compile_prog "" "-lm" ; then
+ LIBS="-lm $LIBS"
+ libs_qga="-lm $libs_qga"
+else
+ echo
+ echo "Error: libm check failed"
+ echo
+ exit 1
+fi
+
##########################################
# Do we need librt
+# uClibc provides 2 versions of clock_gettime(), one with realtime
+# support and one without. This means that the clock_gettime() don't
+# need -lrt. We still need it for timer_create() so we check for this
+# function in addition.
cat > $TMPC <<EOF
#include <signal.h>
#include <time.h>
-int main(void) { return clock_gettime(CLOCK_REALTIME, NULL); }
+int main(void) {
+ timer_create(CLOCK_REALTIME, NULL, NULL);
+ return clock_gettime(CLOCK_REALTIME, NULL);
+}
EOF
if compile_prog "" "" ; then
:
-elif compile_prog "" "-lrt" ; then
+# we need pthread for static linking. use previous pthread test result
+elif compile_prog "" "-lrt $pthread_lib" ; then
LIBS="-lrt $LIBS"
+ libs_qga="-lrt $libs_qga"
fi
if test "$darwin" != "yes" -a "$mingw32" != "yes" -a "$solaris" != yes -a \
if $pkg_config --atleast-version=0.12.0 spice-protocol >/dev/null 2>&1; then
spice_qxl_io_monitors_config_async="yes"
fi
+ if $pkg_config --atleast-version=0.12.2 spice-protocol > /dev/null 2>&1; then
+ spice_qxl_client_monitors_config="yes"
+ fi
else
if test "$spice" = "yes" ; then
feature_not_found "spice"
echo "CONFIG_QXL_IO_MONITORS_CONFIG_ASYNC=y" >> $config_host_mak
fi
+if test "$spice_qxl_client_monitors_config" = "yes" ; then
+ echo "CONFIG_QXL_CLIENT_MONITORS_CONFIG=y" >> $config_host_mak
+fi
+
if test "$smartcard" = "yes" ; then
echo "CONFIG_SMARTCARD=y" >> $config_host_mak
fi
echo "HOST_CC := REAL_CC=\"\$(HOST_CC)\" cgcc" >> $config_host_mak
echo "QEMU_CFLAGS += -Wbitwise -Wno-transparent-union -Wno-old-initializer -Wno-non-pointer-null" >> $config_host_mak
fi
-echo "HELPER_CFLAGS=$helper_cflags" >> $config_host_mak
echo "LDFLAGS=$LDFLAGS" >> $config_host_mak
echo "ARLIBS_BEGIN=$arlibs_begin" >> $config_host_mak
echo "ARLIBS_END=$arlibs_end" >> $config_host_mak
symlink "$source_path/Makefile.target" "$target_dir/Makefile"
-
-case "$target_arch2" in
- alpha | i386 | or32 | s390x | sparc* | x86_64 | xtensa* | ppc*)
- echo "CONFIG_TCG_PASS_AREG0=y" >> $config_target_mak
- ;;
-esac
-
upper() {
echo "$@"| LC_ALL=C tr '[a-z]' '[A-Z]'
}
memset(&pf, 0x00, sizeof(PixelFormat));
pf.bits_per_pixel = bpp;
- pf.bytes_per_pixel = bpp / 8;
+ pf.bytes_per_pixel = DIV_ROUND_UP(bpp, 8);
pf.depth = bpp == 32 ? 24 : bpp;
switch (bpp) {
memset(&pf, 0x00, sizeof(PixelFormat));
pf.bits_per_pixel = bpp;
- pf.bytes_per_pixel = bpp / 8;
+ pf.bytes_per_pixel = DIV_ROUND_UP(bpp, 8);
pf.depth = bpp == 32 ? 24 : bpp;
switch (bpp) {
case 15:
pf.bits_per_pixel = 16;
- pf.bytes_per_pixel = 2;
pf.rmask = 0x00007c00;
pf.gmask = 0x000003E0;
pf.bmask = 0x0000001F;
#define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v)
-#ifndef CONFIG_TCG_PASS_AREG0
-#define ldub_code(p) ldub_raw(p)
-#define ldsb_code(p) ldsb_raw(p)
-#define lduw_code(p) lduw_raw(p)
-#define ldsw_code(p) ldsw_raw(p)
-#define ldl_code(p) ldl_raw(p)
-#define ldq_code(p) ldq_raw(p)
-#else
#define cpu_ldub_code(env1, p) ldub_raw(p)
#define cpu_ldsb_code(env1, p) ldsb_raw(p)
#define cpu_lduw_code(env1, p) lduw_raw(p)
#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
-#endif
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define stfl_kernel(p, v) stfl_raw(p, v)
#define stfq_kernel(p, vt) stfq_raw(p, v)
-#ifdef CONFIG_TCG_PASS_AREG0
#define cpu_ldub_data(env, addr) ldub_raw(addr)
#define cpu_lduw_data(env, addr) lduw_raw(addr)
#define cpu_ldl_data(env, addr) ldl_raw(addr)
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
-#endif
#endif /* defined(CONFIG_USER_ONLY) */
/* page related stuff */
mmu_idx = cpu_mmu_index(env1);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) {
-#ifdef CONFIG_TCG_PASS_AREG0
cpu_ldub_code(env1, addr);
-#else
- ldub_code(addr);
-#endif
}
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
mr = iotlb_to_region(pd);
#define MMUSUFFIX _cmmu
#undef GETPC
#define GETPC() ((uintptr_t)0)
-#define env cpu_single_env
#define SOFTMMU_CODE_ACCESS
#define SHIFT 0
#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
+/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+
#endif /* DEF_HELPER_H */
#ifndef GEN_HELPER
Returns:
- H_SUCCESS : Successully called the RTAS function (RTAS result
+ H_SUCCESS : Successfully called the RTAS function (RTAS result
will have been stored in the parameter block)
H_PARAMETER : Unknown token
xhci controller support
-----------------------
-There also is xhci host controller support available. It got alot
+There is also xhci host controller support available. It got a lot
less testing than ehci and there are a bunch of known limitations, so
ehci may work better for you. On the other hand the xhci hardware
design is much more virtualization-friendly, thus xhci emulation uses
-less ressources (especially cpu). If you wanna give xhci a try
+less resources (especially cpu). If you want to give xhci a try
use this to add the host controller ...
qemu -device nec-usb-xhci,id=xhci
+++ /dev/null
-/*
- * dyngen defines for micro operation code
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#if !defined(__DYNGEN_EXEC_H__)
-#define __DYNGEN_EXEC_H__
-
-#if defined(CONFIG_TCG_INTERPRETER)
-/* The TCG interpreter does not need a special register AREG0,
- * but it is possible to use one by defining AREG0.
- * On i386, register edi seems to work. */
-/* Run without special register AREG0 or use a value defined elsewhere. */
-#elif defined(__i386__)
-#define AREG0 "ebp"
-#elif defined(__x86_64__)
-#define AREG0 "r14"
-#elif defined(_ARCH_PPC)
-#define AREG0 "r27"
-#elif defined(__arm__)
-#define AREG0 "r6"
-#elif defined(__hppa__)
-#define AREG0 "r17"
-#elif defined(__mips__)
-#define AREG0 "s0"
-#elif defined(__sparc__)
-#ifdef CONFIG_SOLARIS
-#define AREG0 "g2"
-#else
-#ifdef __sparc_v9__
-#define AREG0 "g5"
-#else
-#define AREG0 "g6"
-#endif
-#endif
-#elif defined(__s390__)
-#define AREG0 "r10"
-#elif defined(__alpha__)
-/* Note $15 is the frame pointer, so anything in op-i386.c that would
- require a frame pointer, like alloca, would probably loose. */
-#define AREG0 "$15"
-#elif defined(__mc68000)
-#define AREG0 "%a5"
-#elif defined(__ia64__)
-#define AREG0 "r7"
-#else
-#error unsupported CPU
-#endif
-
-#if defined(AREG0)
-register CPUArchState *env asm(AREG0);
-#else
-/* TODO: Try env = cpu_single_env. */
-extern CPUArchState *env;
-#endif
-
-#endif /* !defined(__DYNGEN_EXEC_H__) */
#else
#define MAX_OPC_PARAM_PER_ARG 1
#endif
-#define MAX_OPC_PARAM_IARGS 4
+#define MAX_OPC_PARAM_IARGS 5
#define MAX_OPC_PARAM_OARGS 1
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
#define ACCESS_TYPE (NB_MMU_MODES + 1)
#define MEMSUFFIX _code
-#ifndef CONFIG_TCG_PASS_AREG0
-#define env cpu_single_env
-#endif
#define DATA_SIZE 1
#include "softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
-#undef env
#endif
}
}
+static int memory_try_enable_merging(void *addr, size_t len)
+{
+ QemuOpts *opts;
+
+ opts = qemu_opts_find(qemu_find_opts("machine"), 0);
+ if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
+ /* disabled by the user */
+ return 0;
+ }
+
+ return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
+}
+
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr)
{
new_block->host = file_ram_alloc(new_block, size, mem_path);
if (!new_block->host) {
new_block->host = qemu_vmalloc(size);
- qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
+ memory_try_enable_merging(new_block->host, size);
}
#else
fprintf(stderr, "-mem-path option unsupported\n");
} else {
new_block->host = qemu_vmalloc(size);
}
- qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
+ memory_try_enable_merging(new_block->host, size);
}
}
new_block->length = size;
length, addr);
exit(1);
}
- qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
+ memory_try_enable_merging(vaddr, length);
qemu_ram_setup_dump(vaddr, length);
}
return;
/* ROM/RAM case */
ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
+ /* set dirty bit */
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
+ }
qemu_put_ram_ptr(ptr);
}
len -= l;
VGACommonState *s = &c->vga;
int val, index;
+ qemu_flush_coalesced_mmio_buffer();
+
if (vga_ioport_invalid(s, addr)) {
val = 0xff;
} else {
VGACommonState *s = &c->vga;
int index;
+ qemu_flush_coalesced_mmio_buffer();
+
/* check port range access depending on color/monochrome mode */
if (vga_ioport_invalid(s, addr)) {
return;
/* I/O handler for LFB */
memory_region_init_io(&s->cirrus_linear_io, &cirrus_linear_io_ops, s,
"cirrus-linear-io", VGA_RAM_SIZE);
+ memory_region_set_flush_coalesced(&s->cirrus_linear_io);
/* I/O handler for LFB */
memory_region_init_io(&s->cirrus_linear_bitblt_io,
s,
"cirrus-bitblt-mmio",
0x400000);
+ memory_region_set_flush_coalesced(&s->cirrus_linear_bitblt_io);
/* I/O handler for memory-mapped I/O */
memory_region_init_io(&s->cirrus_mmio_io, &cirrus_mmio_io_ops, s,
"cirrus-mmio", CIRRUS_PNPMMIO_SIZE);
+ memory_region_set_flush_coalesced(&s->cirrus_mmio_io);
s->real_vram_size =
(s->device_id == CIRRUS_ID_CLGD5446) ? 4096 * 1024 : 2048 * 1024;
s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
s->mac_reg[RCTL]);
+ qemu_flush_queued_packets(&s->nic->nc);
}
static void
{
s->check_rxov = 0;
s->mac_reg[index] = val & 0xffff;
+ if (e1000_has_rxbufs(s, 1)) {
+ qemu_flush_queued_packets(&s->nic->nc);
+ }
}
static void
}
set_ru_state(s, ru_ready);
s->ru_offset = e100_read_reg4(s, SCBPointer);
+ qemu_flush_queued_packets(&s->nic->nc);
TRACE(OTHER, logout("val=0x%02x (rx start)\n", val));
break;
case RX_RESUME:
if (rfd_command & COMMAND_EL) {
/* EL bit is set, so this was the last frame. */
logout("receive: Running out of frames\n");
- set_ru_state(s, ru_suspended);
+ set_ru_state(s, ru_no_resources);
+ eepro100_rnr_interrupt(s);
}
if (rfd_command & COMMAND_S) {
/* S bit is set. */
ad->port_no = i;
ad->port.dma = &ad->dma;
ad->port.dma->ops = &ahci_dma_ops;
- ad->port_regs.cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON;
}
}
pr->irq_stat = 0;
pr->irq_mask = 0;
pr->scr_ctl = 0;
+ pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON;
ahci_reset_port(s, i);
}
}
int sense;
bool start = buf[4] & 1;
bool loej = buf[4] & 2; /* load on start, eject on !start */
+ int pwrcnd = buf[4] & 0xf0;
+
+ if (pwrcnd) {
+ /* eject/load only happens for power condition == 0 */
+ return;
+ }
if (loej) {
if (!start && !s->tray_open && s->tray_locked) {
{ 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
/* airflow-temperature-celsius */
{ 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
- /* end of list */
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
};
static int ide_handle_rw_error(IDEState *s, int error, int op);
case SMART_READ_THRESH:
memset(s->io_buffer, 0, 0x200);
s->io_buffer[0] = 0x01; /* smart struct version */
- for (n=0; n<30; n++) {
- if (smart_attributes[n][0] == 0)
- break;
+ for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
s->io_buffer[2+0+(n*12)] = smart_attributes[n][0];
s->io_buffer[2+1+(n*12)] = smart_attributes[n][11];
}
case SMART_READ_DATA:
memset(s->io_buffer, 0, 0x200);
s->io_buffer[0] = 0x01; /* smart struct version */
- for (n=0; n<30; n++) {
- if (smart_attributes[n][0] == 0) {
- break;
- }
+ for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
int i;
for(i = 0; i < 11; i++) {
s->io_buffer[2+i+(n*12)] = smart_attributes[n][i];
*
* Copyright (c) 2008 OKL
* Copyright (c) 2011 NICTA Pty Ltd
- * Originally Written by Hans Jiang
+ * Originally written by Hans Jiang
*
- * This code is licenced under the GPL version 2 or later. See
+ * This code is licensed under the GPL version 2 or later. See
* the COPYING file in the top-level directory.
*
* TODO: implement vectors.
*
* Copyright (c) 2008 OK Labs
* Copyright (c) 2011 NICTA Pty Ltd
- * Originally Written by Hans Jiang
+ * Originally written by Hans Jiang
* Updated by Peter Chubb
*
- * This code is licenced under GPL version 2 or later. See
+ * This code is licensed under GPL version 2 or later. See
* the COPYING file in the top-level directory.
*
*/
* Written by Hans at OK-Labs
* Updated by Peter Chubb.
*
- * This code is licenced under the GPL, version 2 or later.
+ * This code is licensed under the GPL, version 2 or later.
* See the file `COPYING' in the top level directory.
*
* It (partially) emulates a Kyoto Microcomputer
#include "bitmap.h"
#include "vga-pci.h"
-/* output Bochs bios info messages */
-//#define DEBUG_BIOS
-
/* debug PC/ISA interrupts */
//#define DEBUG_IRQ
static int shutdown_index = 0;
switch(addr) {
- /* Bochs BIOS messages */
- case 0x400:
- case 0x401:
- /* used to be panic, now unused */
- break;
- case 0x402:
- case 0x403:
-#ifdef DEBUG_BIOS
- fprintf(stderr, "%c", val);
-#endif
- break;
case 0x8900:
/* same as Bochs power off */
if (val == shutdown_str[shutdown_index]) {
}
break;
- /* LGPL'ed VGA BIOS messages */
case 0x501:
case 0x502:
exit((val << 1) | 1);
- case 0x500:
- case 0x503:
-#ifdef DEBUG_BIOS
- fprintf(stderr, "%c", val);
-#endif
- break;
}
}
uint64_t *numa_fw_cfg;
int i, j;
- register_ioport_write(0x400, 1, 2, bochs_bios_write, NULL);
- register_ioport_write(0x401, 1, 2, bochs_bios_write, NULL);
- register_ioport_write(0x402, 1, 1, bochs_bios_write, NULL);
- register_ioport_write(0x403, 1, 1, bochs_bios_write, NULL);
register_ioport_write(0x8900, 1, 1, bochs_bios_write, NULL);
register_ioport_write(0x501, 1, 1, bochs_bios_write, NULL);
register_ioport_write(0x501, 1, 2, bochs_bios_write, NULL);
register_ioport_write(0x502, 1, 2, bochs_bios_write, NULL);
- register_ioport_write(0x500, 1, 1, bochs_bios_write, NULL);
- register_ioport_write(0x503, 1, 1, bochs_bios_write, NULL);
fw_cfg = fw_cfg_init(BIOS_CFG_IOPORT, BIOS_CFG_IOPORT + 1, 0, 0);
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include <zlib.h>
+
#include "qemu-common.h"
#include "qemu-timer.h"
#include "qemu-queue.h"
void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
{
+ trace_qxl_set_guest_bug(qxl->id);
qxl_send_events(qxl, QXL_INTERRUPT_ERROR);
qxl->guest_bug = 1;
if (qxl->guestdebug) {
spice_qxl_destroy_surface_async(&qxl->ssd.qxl, id, (uintptr_t)cookie);
} else {
qxl->ssd.worker->destroy_surface_wait(qxl->ssd.worker, id);
+ qxl_spice_destroy_surface_wait_complete(qxl, id);
}
}
case QXL_MODE_VGA:
ret = false;
qemu_mutex_lock(&qxl->ssd.lock);
- if (qxl->ssd.update != NULL) {
- update = qxl->ssd.update;
- qxl->ssd.update = NULL;
+ update = QTAILQ_FIRST(&qxl->ssd.updates);
+ if (update != NULL) {
+ QTAILQ_REMOVE(&qxl->ssd.updates, update, next);
*ext = update->ext;
ret = true;
}
{
PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl);
+ if (runstate_check(RUN_STATE_INMIGRATE) ||
+ runstate_check(RUN_STATE_POSTMIGRATE)) {
+ return;
+ }
+
qxl->shadow_rom.client_present = client_present;
memcpy(qxl->shadow_rom.client_capabilities, caps, sizeof(caps));
qxl->rom->client_present = client_present;
#endif
+#if defined(CONFIG_QXL_CLIENT_MONITORS_CONFIG) \
+ && SPICE_SERVER_VERSION >= 0x000b05
+
+static uint32_t qxl_crc32(const uint8_t *p, unsigned len)
+{
+ /*
+ * zlib xors the seed with 0xffffffff, and xors the result
+ * again with 0xffffffff; Both are not done with linux's crc32,
+ * which we want to be compatible with, so undo that.
+ */
+ return crc32(0xffffffff, p, len) ^ 0xffffffff;
+}
+
+/* called from main context only */
+static int interface_client_monitors_config(QXLInstance *sin,
+ VDAgentMonitorsConfig *monitors_config)
+{
+ PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl);
+ QXLRom *rom = memory_region_get_ram_ptr(&qxl->rom_bar);
+ int i;
+
+ /*
+ * Older windows drivers set int_mask to 0 when their ISR is called,
+ * then later set it to ~0. So it doesn't relate to the actual interrupts
+ * handled. However, they are old, so clearly they don't support this
+ * interrupt
+ */
+ if (qxl->ram->int_mask == 0 || qxl->ram->int_mask == ~0 ||
+ !(qxl->ram->int_mask & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)) {
+ trace_qxl_client_monitors_config_unsupported_by_guest(qxl->id,
+ qxl->ram->int_mask,
+ monitors_config);
+ return 0;
+ }
+ if (!monitors_config) {
+ return 1;
+ }
+ memset(&rom->client_monitors_config, 0,
+ sizeof(rom->client_monitors_config));
+ rom->client_monitors_config.count = monitors_config->num_of_monitors;
+ /* monitors_config->flags ignored */
+ if (rom->client_monitors_config.count >=
+ ARRAY_SIZE(rom->client_monitors_config.heads)) {
+ trace_qxl_client_monitors_config_capped(qxl->id,
+ monitors_config->num_of_monitors,
+ ARRAY_SIZE(rom->client_monitors_config.heads));
+ rom->client_monitors_config.count =
+ ARRAY_SIZE(rom->client_monitors_config.heads);
+ }
+ for (i = 0 ; i < rom->client_monitors_config.count ; ++i) {
+ VDAgentMonConfig *monitor = &monitors_config->monitors[i];
+ QXLURect *rect = &rom->client_monitors_config.heads[i];
+ /* monitor->depth ignored */
+ rect->left = monitor->x;
+ rect->top = monitor->y;
+ rect->right = monitor->x + monitor->width;
+ rect->bottom = monitor->y + monitor->height;
+ }
+ rom->client_monitors_config_crc = qxl_crc32(
+ (const uint8_t *)&rom->client_monitors_config,
+ sizeof(rom->client_monitors_config));
+ trace_qxl_client_monitors_config_crc(qxl->id,
+ sizeof(rom->client_monitors_config),
+ rom->client_monitors_config_crc);
+
+ trace_qxl_interrupt_client_monitors_config(qxl->id,
+ rom->client_monitors_config.count,
+ rom->client_monitors_config.heads);
+ qxl_send_events(qxl, QXL_INTERRUPT_CLIENT_MONITORS_CONFIG);
+ return 1;
+}
+#endif
+
static const QXLInterface qxl_interface = {
.base.type = SPICE_INTERFACE_QXL,
.base.description = "qxl gpu",
#if SPICE_SERVER_VERSION >= 0x000b04
.set_client_capabilities = interface_set_client_capabilities,
#endif
+#if SPICE_SERVER_VERSION >= 0x000b05 && \
+ defined(CONFIG_QXL_CLIENT_MONITORS_CONFIG)
+ .client_monitors_config = interface_client_monitors_config,
+#endif
};
static void qxl_enter_vga_mode(PCIQXLDevice *d)
break;
}
trace_qxl_io_unexpected_vga_mode(d->id,
- io_port, io_port_to_string(io_port));
+ addr, val, io_port_to_string(io_port));
/* be nice to buggy guest drivers */
if (io_port >= QXL_IO_UPDATE_AREA_ASYNC &&
io_port < QXL_IO_RANGE_SIZE) {
return;
}
+ if (update.left < 0 || update.top < 0 || update.left >= update.right ||
+ update.top >= update.bottom) {
+ qxl_set_guest_bug(d, "QXL_IO_UPDATE_AREA: "
+ "invalid area(%d,%d,%d,%d)\n", update.left,
+ update.right, update.top, update.bottom);
+ break;
+ }
if (async == QXL_ASYNC) {
cookie = qxl_cookie_new(QXL_COOKIE_TYPE_IO,
QXL_IO_UPDATE_AREA_ASYNC);
qxl_set_mode(d, val, 0);
break;
case QXL_IO_LOG:
+ trace_qxl_io_log(d->id, d->ram->log_buf);
if (d->guestdebug) {
fprintf(stderr, "qxl/guest-%d: %" PRId64 ": %s", d->id,
qemu_get_clock_ns(vm_clock), d->ram->log_buf);
static uint64_t ioport_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
- PCIQXLDevice *d = opaque;
+ PCIQXLDevice *qxl = opaque;
- trace_qxl_io_read_unexpected(d->id);
+ trace_qxl_io_read_unexpected(qxl->id);
return 0xff;
}
uint32_t old_pending;
uint32_t le_events = cpu_to_le32(events);
+ trace_qxl_send_events(d->id, events);
assert(qemu_spice_display_is_running(&d->ssd));
old_pending = __sync_fetch_and_or(&d->ram->int_pending, le_events);
if ((old_pending & le_events) == le_events) {
if (qxl->id == 0) {
vga_dirty_log_start(&qxl->vga);
}
+ memory_region_set_flush_coalesced(&qxl->io_bar);
pci_register_bar(&qxl->pci, QXL_IO_RANGE_INDEX,
#include "sysemu.h"
#include "cpu.h"
-#include "dyngen-exec.h"
#include "qemu-char.h"
#include "sysemu.h"
#include "qemu-char.h"
uint8_t reserved1[6];
uint64_t tag;
uint8_t reserved2[4];
- uint64_t lun QEMU_PACKED;
+ uint64_t lun;
uint8_t reserved3[2];
uint8_t tsk_mgmt_func;
uint8_t reserved4;
uint64_t task_tag;
uint8_t reserved5[8];
-};
+} QEMU_PACKED;
/*
* We need the packed attribute because the SRP spec only aligns the
uint8_t data_in_desc_cnt;
uint64_t tag;
uint8_t reserved2[4];
- uint64_t lun QEMU_PACKED;
+ uint64_t lun;
uint8_t reserved3;
uint8_t task_attr;
uint8_t reserved4;
uint8_t add_cdb_len;
uint8_t cdb[16];
uint8_t add_data[0];
-};
+} QEMU_PACKED;
enum {
SRP_RSP_FLAG_RSPVALID = 1 << 0,
return 0;
}
+/* Prepare to receive the next packet */
+static void usb_net_reset_in_buf(USBNetState *s)
+{
+ s->in_ptr = s->in_len = 0;
+ qemu_flush_queued_packets(&s->nic->nc);
+}
+
static int rndis_parse(USBNetState *s, uint8_t *data, int length)
{
uint32_t msg_type;
case RNDIS_RESET_MSG:
rndis_clear_responsequeue(s);
- s->out_ptr = s->in_ptr = s->in_len = 0;
+ s->out_ptr = 0;
+ usb_net_reset_in_buf(s);
return rndis_reset_response(s, (rndis_reset_msg_type *) data);
case RNDIS_KEEPALIVE_MSG:
int ret = USB_RET_NAK;
if (s->in_ptr > s->in_len) {
- s->in_ptr = s->in_len = 0;
+ usb_net_reset_in_buf(s);
ret = USB_RET_NAK;
return ret;
}
if (s->in_ptr >= s->in_len &&
(is_rndis(s) || (s->in_len & (64 - 1)) || !ret)) {
/* no short packet necessary */
- s->in_ptr = s->in_len = 0;
+ usb_net_reset_in_buf(s);
}
#ifdef TRAFFIC_DEBUG
static ssize_t usbnet_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
USBNetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
- struct rndis_packet_msg_type *msg;
+ uint8_t *in_buf = s->in_buf;
+ size_t total_size = size;
if (is_rndis(s)) {
- msg = (struct rndis_packet_msg_type *) s->in_buf;
if (s->rndis_state != RNDIS_DATA_INITIALIZED) {
return -1;
}
- if (size + sizeof(struct rndis_packet_msg_type) > sizeof(s->in_buf))
- return -1;
+ total_size += sizeof(struct rndis_packet_msg_type);
+ }
+ if (total_size > sizeof(s->in_buf)) {
+ return -1;
+ }
+ /* Only accept packet if input buffer is empty */
+ if (s->in_len > 0) {
+ return 0;
+ }
+
+ if (is_rndis(s)) {
+ struct rndis_packet_msg_type *msg;
+
+ msg = (struct rndis_packet_msg_type *)in_buf;
memset(msg, 0, sizeof(struct rndis_packet_msg_type));
msg->MessageType = cpu_to_le32(RNDIS_PACKET_MSG);
- msg->MessageLength = cpu_to_le32(size + sizeof(struct rndis_packet_msg_type));
- msg->DataOffset = cpu_to_le32(sizeof(struct rndis_packet_msg_type) - 8);
+ msg->MessageLength = cpu_to_le32(size + sizeof(*msg));
+ msg->DataOffset = cpu_to_le32(sizeof(*msg) - 8);
msg->DataLength = cpu_to_le32(size);
/* msg->OOBDataOffset;
* msg->OOBDataLength;
* msg->VcHandle;
* msg->Reserved;
*/
- memcpy(msg + 1, buf, size);
- s->in_len = size + sizeof(struct rndis_packet_msg_type);
- } else {
- if (size > sizeof(s->in_buf))
- return -1;
- memcpy(s->in_buf, buf, size);
- s->in_len = size;
+ in_buf += sizeof(*msg);
}
+
+ memcpy(in_buf, buf, size);
+ s->in_len = total_size;
s->in_ptr = 0;
return size;
}
s_ioport_ctrl = g_malloc(sizeof(*s_ioport_ctrl));
memory_region_init_io(s_ioport_ctrl, &vga_mm_ctrl_ops, s,
"vga-mm-ctrl", 0x100000);
+ memory_region_set_flush_coalesced(s_ioport_ctrl);
vga_io_memory = g_malloc(sizeof(*vga_io_memory));
/* XXX: endianness? */
VGACommonState *s = opaque;
int val, index;
+ qemu_flush_coalesced_mmio_buffer();
+
if (vga_ioport_invalid(s, addr)) {
val = 0xff;
} else {
VGACommonState *s = opaque;
int index;
+ qemu_flush_coalesced_mmio_buffer();
+
/* check port range access depending on color/monochrome mode */
if (vga_ioport_invalid(s, addr)) {
return;
vga_mem = g_malloc(sizeof(*vga_mem));
memory_region_init_io(vga_mem, &vga_mem_ops, s,
"vga-lowmem", 0x20000);
+ memory_region_set_flush_coalesced(vga_mem);
return vga_mem;
}
VirtIONet *n = to_virtio_net(vdev);
qemu_flush_queued_packets(&n->nic->nc);
-
- /* We now have RX buffers, signal to the IO thread to break out of the
- * select to re-poll the tap file descriptor */
- qemu_notify_event();
}
static int virtio_net_can_receive(NetClientState *nc)
memory_region_init_io(&s->io_bar, &vmsvga_io_ops, &s->chip,
"vmsvga-io", 0x10);
+ memory_region_set_flush_coalesced(&s->io_bar);
pci_register_bar(&s->card, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar);
vmsvga_init(&s->chip, pci_address_space(dev),
}
-/* This size should be enough to read the first 7 lines of a ressource file */
-#define XEN_HOST_PCI_RESSOURCE_BUFFER_SIZE 400
+/* This size should be enough to read the first 7 lines of a resource file */
+#define XEN_HOST_PCI_RESOURCE_BUFFER_SIZE 400
static int xen_host_pci_get_resource(XenHostPCIDevice *d)
{
int i, rc, fd;
char path[PATH_MAX];
- char buf[XEN_HOST_PCI_RESSOURCE_BUFFER_SIZE];
+ char buf[XEN_HOST_PCI_RESOURCE_BUFFER_SIZE];
unsigned long long start, end, flags, size;
char *endptr, *s;
uint8_t type;
{
struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
net_tx_packets(netdev);
+ qemu_flush_queued_packets(&netdev->nic->nc);
}
static int net_free(struct XenDevice *xendev)
* - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
*/
-/* emulated register infomation */
+/* emulated register information */
struct XenPTRegInfo {
uint32_t offset;
uint32_t size;
(XenPCIPassthroughState *, const XenPTRegGroupInfo *,
uint32_t base_offset, uint8_t *size);
-/* emulated register group infomation */
+/* emulated register group information */
struct XenPTRegGroupInfo {
uint8_t grp_id;
XenPTRegisterGroupType grp_type;
return 0;
}
-/* Header Type0 reg static infomation table */
+/* Header Type0 reg static information table */
static XenPTRegInfo xen_pt_emu_reg_header0[] = {
/* Vendor ID reg */
{
* Vital Product Data Capability
*/
-/* Vital Product Data Capability Structure reg static infomation table */
+/* Vital Product Data Capability Structure reg static information table */
static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
{
.offset = PCI_CAP_LIST_NEXT,
* Vendor Specific Capability
*/
-/* Vendor Specific Capability Structure reg static infomation table */
+/* Vendor Specific Capability Structure reg static information table */
static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
{
.offset = PCI_CAP_LIST_NEXT,
return 0;
}
-/* PCI Express Capability Structure reg static infomation table */
+/* PCI Express Capability Structure reg static information table */
static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
/* Next Pointer reg */
{
return 0;
}
-/* Power Management Capability reg static infomation table */
+/* Power Management Capability reg static information table */
static XenPTRegInfo xen_pt_emu_reg_pm[] = {
/* Next Pointer reg */
{
return 0;
}
-/* MSI Capability Structure reg static infomation table */
+/* MSI Capability Structure reg static information table */
static XenPTRegInfo xen_pt_emu_reg_msi[] = {
/* Next Pointer reg */
{
return 0;
}
-/* MSI-X Capability Structure reg static infomation table */
+/* MSI-X Capability Structure reg static information table */
static XenPTRegInfo xen_pt_emu_reg_msix[] = {
/* Next Pointer reg */
{
{
IOHandlerRecord *ioh;
+ assert(fd >= 0);
+
if (!fd_read && !fd_write) {
QLIST_FOREACH(ioh, &io_handlers, next) {
if (ioh->fd == fd) {
/* The man page (and posix) say ioctl numbers are signed int, but
* they're not. Linux, glibc and *BSD all treat ioctl numbers as
* unsigned, and treating them as signed here can break things */
- unsigned irqchip_inject_ioctl;
+ unsigned irq_set_ioctl;
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing *irq_routes;
int nr_allocated_irq_routes;
event.level = level;
event.irq = irq;
- ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event);
+ ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
if (ret < 0) {
perror("kvm_set_irq");
abort();
}
- return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
+ return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
}
#ifdef KVM_CAP_IRQ_ROUTING
return ret;
}
- s->irqchip_inject_ioctl = KVM_IRQ_LINE;
- if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
- s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
- }
kvm_kernel_irqchip = true;
/* If we have an in-kernel IRQ chip then we must have asynchronous
* interrupt delivery (though the reverse is not necessarily true)
s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
+ s->irq_set_ioctl = KVM_IRQ_LINE;
+ if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
+ s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
+ }
+
ret = kvm_arch_init(s);
if (ret < 0) {
goto err;
return 0;
err:
- if (s) {
- if (s->vmfd >= 0) {
- close(s->vmfd);
- }
- if (s->fd != -1) {
- close(s->fd);
- }
+ if (s->vmfd >= 0) {
+ close(s->vmfd);
+ }
+ if (s->fd != -1) {
+ close(s->fd);
}
g_free(s);
qemu_mutex_lock_iothread();
kvm_arch_post_run(env, run);
- kvm_flush_coalesced_mmio_buffer();
-
if (run_ret < 0) {
if (run_ret == -EINTR || run_ret == -EAGAIN) {
DPRINTF("io window exit\n");
}
#endif
-/* timers for rdtsc */
-
-#if 0
-
-static uint64_t emu_time;
-
-int64_t cpu_get_real_ticks(void)
-{
- return emu_time++;
-}
-
-#endif
-
#if defined(CONFIG_USE_NPTL)
/***********************************************************/
/* Helper routines for implementing atomic operations. */
#include "exec-obsolete.h"
unsigned memory_region_transaction_depth = 0;
-static bool memory_region_update_pending = false;
static bool global_dirty_log = false;
static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
MemoryRegion *mr = opaque;
uint64_t tmp;
+ if (mr->flush_coalesced_mmio) {
+ qemu_flush_coalesced_mmio_buffer();
+ }
tmp = mr->ops->read(mr->opaque, addr, size);
*value |= (tmp & mask) << shift;
}
MemoryRegion *mr = opaque;
uint64_t tmp;
+ if (mr->flush_coalesced_mmio) {
+ qemu_flush_coalesced_mmio_buffer();
+ }
tmp = (*value >> shift) & mask;
mr->ops->write(mr->opaque, addr, tmp, size);
}
address_space_update_ioeventfds(as);
}
-static void memory_region_update_topology(MemoryRegion *mr)
-{
- if (memory_region_transaction_depth) {
- memory_region_update_pending |= !mr || mr->enabled;
- return;
- }
-
- if (mr && !mr->enabled) {
- return;
- }
-
- MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
-
- if (address_space_memory.root) {
- address_space_update_topology(&address_space_memory);
- }
- if (address_space_io.root) {
- address_space_update_topology(&address_space_io);
- }
-
- MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
-
- memory_region_update_pending = false;
-}
-
void memory_region_transaction_begin(void)
{
+ qemu_flush_coalesced_mmio_buffer();
++memory_region_transaction_depth;
}
{
assert(memory_region_transaction_depth);
--memory_region_transaction_depth;
- if (!memory_region_transaction_depth && memory_region_update_pending) {
- memory_region_update_topology(NULL);
+ if (!memory_region_transaction_depth) {
+ MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
+
+ if (address_space_memory.root) {
+ address_space_update_topology(&address_space_memory);
+ }
+ if (address_space_io.root) {
+ address_space_update_topology(&address_space_io);
+ }
+
+ MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
}
}
mr->dirty_log_mask = 0;
mr->ioeventfd_nb = 0;
mr->ioeventfds = NULL;
+ mr->flush_coalesced_mmio = false;
}
static bool memory_region_access_valid(MemoryRegion *mr,
{
uint8_t mask = 1 << client;
+ memory_region_transaction_begin();
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
- memory_region_update_topology(mr);
+ memory_region_transaction_commit();
}
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
{
if (mr->readonly != readonly) {
+ memory_region_transaction_begin();
mr->readonly = readonly;
- memory_region_update_topology(mr);
+ memory_region_transaction_commit();
}
}
void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable)
{
if (mr->readable != readable) {
+ memory_region_transaction_begin();
mr->readable = readable;
- memory_region_update_topology(mr);
+ memory_region_transaction_commit();
}
}
cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
memory_region_update_coalesced_range(mr);
+ memory_region_set_flush_coalesced(mr);
}
void memory_region_clear_coalescing(MemoryRegion *mr)
{
CoalescedMemoryRange *cmr;
+ qemu_flush_coalesced_mmio_buffer();
+ mr->flush_coalesced_mmio = false;
+
while (!QTAILQ_EMPTY(&mr->coalesced)) {
cmr = QTAILQ_FIRST(&mr->coalesced);
QTAILQ_REMOVE(&mr->coalesced, cmr, link);
memory_region_update_coalesced_range(mr);
}
+void memory_region_set_flush_coalesced(MemoryRegion *mr)
+{
+ mr->flush_coalesced_mmio = true;
+}
+
+void memory_region_clear_flush_coalesced(MemoryRegion *mr)
+{
+ qemu_flush_coalesced_mmio_buffer();
+ if (QTAILQ_EMPTY(&mr->coalesced)) {
+ mr->flush_coalesced_mmio = false;
+ }
+}
+
void memory_region_add_eventfd(MemoryRegion *mr,
target_phys_addr_t addr,
unsigned size,
};
unsigned i;
+ memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
break;
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
mr->ioeventfds[i] = mrfd;
- memory_region_update_topology(mr);
+ memory_region_transaction_commit();
}
void memory_region_del_eventfd(MemoryRegion *mr,
};
unsigned i;
+ memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
break;
--mr->ioeventfd_nb;
mr->ioeventfds = g_realloc(mr->ioeventfds,
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
- memory_region_update_topology(mr);
+ memory_region_transaction_commit();
}
static void memory_region_add_subregion_common(MemoryRegion *mr,
{
MemoryRegion *other;
+ memory_region_transaction_begin();
+
assert(!subregion->parent);
subregion->parent = mr;
subregion->addr = offset;
}
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
done:
- memory_region_update_topology(mr);
+ memory_region_transaction_commit();
}
void memory_region_del_subregion(MemoryRegion *mr,
MemoryRegion *subregion)
{
+ memory_region_transaction_begin();
assert(subregion->parent == mr);
subregion->parent = NULL;
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
- memory_region_update_topology(mr);
+ memory_region_transaction_commit();
}
void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
if (enabled == mr->enabled) {
return;
}
+ memory_region_transaction_begin();
mr->enabled = enabled;
- memory_region_update_topology(NULL);
+ memory_region_transaction_commit();
}
void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr)
void memory_region_set_alias_offset(MemoryRegion *mr, target_phys_addr_t offset)
{
- target_phys_addr_t old_offset = mr->alias_offset;
-
assert(mr->alias);
- mr->alias_offset = offset;
- if (offset == old_offset || !mr->parent) {
+ if (offset == mr->alias_offset) {
return;
}
- memory_region_update_topology(mr);
+ memory_region_transaction_begin();
+ mr->alias_offset = offset;
+ memory_region_transaction_commit();
}
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
void set_system_memory_map(MemoryRegion *mr)
{
+ memory_region_transaction_begin();
address_space_memory.root = mr;
- memory_region_update_topology(NULL);
+ memory_region_transaction_commit();
}
void set_system_io_map(MemoryRegion *mr)
{
+ memory_region_transaction_begin();
address_space_io.root = mr;
- memory_region_update_topology(NULL);
+ memory_region_transaction_commit();
}
uint64_t io_mem_read(MemoryRegion *mr, target_phys_addr_t addr, unsigned size)
bool enabled;
bool rom_device;
bool warning_printed; /* For reservations */
+ bool flush_coalesced_mmio;
MemoryRegion *alias;
target_phys_addr_t alias_offset;
unsigned priority;
uint64_t size);
/**
- * memory_region_init_ram: Initialize RAM memory region from a user-provided.
- * pointer. Accesses into the region will modify
- * memory directly.
+ * memory_region_init_ram_ptr: Initialize RAM memory region from a
+ * user-provided pointer. Accesses into the
+ * region will modify memory directly.
*
* @mr: the #MemoryRegion to be initialized.
* @name: the name of the region.
*/
void memory_region_clear_coalescing(MemoryRegion *mr);
+/**
+ * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
+ * accesses.
+ *
+ * Ensure that pending coalesced MMIO request are flushed before the memory
+ * region is accessed. This property is automatically enabled for all regions
+ * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
+ * accesses.
+ *
+ * Clear the automatic coalesced MMIO flushing enabled via
+ * memory_region_set_flush_coalesced. Note that this service has no effect on
+ * memory regions that have MMIO coalescing enabled for themselves. For them,
+ * automatic flushing will stop once coalescing is disabled.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_flush_coalesced(MemoryRegion *mr);
+
/**
* memory_region_add_eventfd: Request an eventfd to be triggered when a word
* is written to a location.
target_phys_addr_t offset,
MemoryRegion *subregion);
/**
- * memory_region_add_subregion: Add a subregion to a container, with overlap.
+ * memory_region_add_subregion_overlap: Add a subregion to a container
+ * with overlap.
*
* Adds a subregion at @offset. The subregion may overlap with other
* subregions. Conflicts are resolved by having a higher @priority hide a
void memory_global_dirty_log_start(void);
/**
- * memory_global_dirty_log_stop: begin dirty logging for all regions
+ * memory_global_dirty_log_stop: end dirty logging for all regions
*/
void memory_global_dirty_log_stop(void);
{
nc->receive_disabled = 0;
- qemu_net_queue_flush(nc->send_queue);
+ if (qemu_net_queue_flush(nc->send_queue)) {
+ /* We emptied the queue successfully, signal to the IO thread to repoll
+ * the file descriptor (for tap, for example).
+ */
+ qemu_notify_event();
+ }
}
static ssize_t qemu_send_packet_async_with_flags(NetClientState *sender,
void *opaque)
{
NetClientState *nc = opaque;
+ int ret;
if (nc->link_down) {
return iov_size(iov, iovcnt);
}
+ if (nc->receive_disabled) {
+ return 0;
+ }
+
if (nc->info->receive_iov) {
- return nc->info->receive_iov(nc, iov, iovcnt);
+ ret = nc->info->receive_iov(nc, iov, iovcnt);
} else {
- return nc_sendv_compat(nc, iov, iovcnt);
+ ret = nc_sendv_compat(nc, iov, iovcnt);
}
+
+ if (ret == 0) {
+ nc->receive_disabled = 1;
+ }
+
+ return ret;
}
ssize_t qemu_sendv_packet_async(NetClientState *sender,
continue;
}
- if (!qemu_can_send_packet(&port->nc)) {
- return 0;
+ if (qemu_can_send_packet(&port->nc)) {
+ return 1;
}
}
- return 1;
+ return 0;
}
static ssize_t net_hub_port_receive(NetClientState *nc,
g_free(queue);
}
-static ssize_t qemu_net_queue_append(NetQueue *queue,
- NetClientState *sender,
- unsigned flags,
- const uint8_t *buf,
- size_t size,
- NetPacketSent *sent_cb)
+static void qemu_net_queue_append(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+ const uint8_t *buf,
+ size_t size,
+ NetPacketSent *sent_cb)
{
NetPacket *packet;
memcpy(packet->data, buf, size);
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
-
- return size;
}
-static ssize_t qemu_net_queue_append_iov(NetQueue *queue,
- NetClientState *sender,
- unsigned flags,
- const struct iovec *iov,
- int iovcnt,
- NetPacketSent *sent_cb)
+static void qemu_net_queue_append_iov(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+ const struct iovec *iov,
+ int iovcnt,
+ NetPacketSent *sent_cb)
{
NetPacket *packet;
size_t max_len = 0;
}
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
-
- return packet->size;
}
static ssize_t qemu_net_queue_deliver(NetQueue *queue,
ssize_t ret;
if (queue->delivering || !qemu_can_send_packet(sender)) {
- return qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
+ qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
+ return 0;
}
ret = qemu_net_queue_deliver(queue, sender, flags, data, size);
ssize_t ret;
if (queue->delivering || !qemu_can_send_packet(sender)) {
- return qemu_net_queue_append_iov(queue, sender, flags,
- iov, iovcnt, sent_cb);
+ qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
+ return 0;
}
ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt);
}
}
-void qemu_net_queue_flush(NetQueue *queue)
+bool qemu_net_queue_flush(NetQueue *queue)
{
while (!QTAILQ_EMPTY(&queue->packets)) {
NetPacket *packet;
packet->size);
if (ret == 0) {
QTAILQ_INSERT_HEAD(&queue->packets, packet, entry);
- break;
+ return false;
}
if (packet->sent_cb) {
g_free(packet);
}
+ return true;
}
NetPacketSent *sent_cb);
void qemu_net_queue_purge(NetQueue *queue, NetClientState *from);
-void qemu_net_queue_flush(NetQueue *queue);
+bool qemu_net_queue_flush(NetQueue *queue);
#endif /* QEMU_NET_QUEUE_H */
#include "qemu-error.h"
#include "qemu-option.h"
#include "qemu_socket.h"
+#include "iov.h"
typedef struct NetSocketState {
NetClientState nc;
int state; /* 0 = getting length, 1 = getting data */
unsigned int index;
unsigned int packet_len;
+ unsigned int send_index; /* number of bytes sent (only SOCK_STREAM) */
uint8_t buf[4096];
struct sockaddr_in dgram_dst; /* contains inet host and port destination iff connectionless (SOCK_DGRAM) */
+ IOHandler *send_fn; /* differs between SOCK_STREAM/SOCK_DGRAM */
+ bool read_poll; /* waiting to receive data? */
+ bool write_poll; /* waiting to transmit data? */
} NetSocketState;
static void net_socket_accept(void *opaque);
+static void net_socket_writable(void *opaque);
+
+/* Only read packets from socket when peer can receive them */
+static int net_socket_can_send(void *opaque)
+{
+ NetSocketState *s = opaque;
+
+ return qemu_can_send_packet(&s->nc);
+}
+
+static void net_socket_update_fd_handler(NetSocketState *s)
+{
+ qemu_set_fd_handler2(s->fd,
+ s->read_poll ? net_socket_can_send : NULL,
+ s->read_poll ? s->send_fn : NULL,
+ s->write_poll ? net_socket_writable : NULL,
+ s);
+}
+
+static void net_socket_read_poll(NetSocketState *s, bool enable)
+{
+ s->read_poll = enable;
+ net_socket_update_fd_handler(s);
+}
+
+static void net_socket_write_poll(NetSocketState *s, bool enable)
+{
+ s->write_poll = enable;
+ net_socket_update_fd_handler(s);
+}
+
+static void net_socket_writable(void *opaque)
+{
+ NetSocketState *s = opaque;
+
+ net_socket_write_poll(s, false);
+
+ qemu_flush_queued_packets(&s->nc);
+}
-/* XXX: we consider we can send the whole packet without blocking */
static ssize_t net_socket_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc);
- uint32_t len;
- len = htonl(size);
-
- send_all(s->fd, (const uint8_t *)&len, sizeof(len));
- return send_all(s->fd, buf, size);
+ uint32_t len = htonl(size);
+ struct iovec iov[] = {
+ {
+ .iov_base = &len,
+ .iov_len = sizeof(len),
+ }, {
+ .iov_base = (void *)buf,
+ .iov_len = size,
+ },
+ };
+ size_t remaining;
+ ssize_t ret;
+
+ remaining = iov_size(iov, 2) - s->send_index;
+ ret = iov_send(s->fd, iov, 2, s->send_index, remaining);
+
+ if (ret == -1 && errno == EAGAIN) {
+ ret = 0; /* handled further down */
+ }
+ if (ret == -1) {
+ s->send_index = 0;
+ return -errno;
+ }
+ if (ret < (ssize_t)remaining) {
+ s->send_index += ret;
+ net_socket_write_poll(s, true);
+ return 0;
+ }
+ s->send_index = 0;
+ return size;
}
static ssize_t net_socket_receive_dgram(NetClientState *nc, const uint8_t *buf, size_t size)
{
NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc);
+ ssize_t ret;
+
+ do {
+ ret = sendto(s->fd, buf, size, 0,
+ (struct sockaddr *)&s->dgram_dst,
+ sizeof(s->dgram_dst));
+ } while (ret == -1 && errno == EINTR);
- return sendto(s->fd, (const void *)buf, size, 0,
- (struct sockaddr *)&s->dgram_dst, sizeof(s->dgram_dst));
+ if (ret == -1 && errno == EAGAIN) {
+ net_socket_write_poll(s, true);
+ return 0;
+ }
+ return ret;
}
static void net_socket_send(void *opaque)
} else if (size == 0) {
/* end of connection */
eoc:
- qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
+ net_socket_read_poll(s, false);
+ net_socket_write_poll(s, false);
if (s->listen_fd != -1) {
qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
}
return;
if (size == 0) {
/* end of connection */
- qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
+ net_socket_read_poll(s, false);
+ net_socket_write_poll(s, false);
return;
}
qemu_send_packet(&s->nc, s->buf, size);
{
NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc);
if (s->fd != -1) {
- qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
+ net_socket_read_poll(s, false);
+ net_socket_write_poll(s, false);
close(s->fd);
s->fd = -1;
}
s->fd = fd;
s->listen_fd = -1;
-
- qemu_set_fd_handler(s->fd, net_socket_send_dgram, NULL, s);
+ s->send_fn = net_socket_send_dgram;
+ net_socket_read_poll(s, true);
/* mcast: save bound address as dst */
if (is_connected) {
static void net_socket_connect(void *opaque)
{
NetSocketState *s = opaque;
- qemu_set_fd_handler(s->fd, net_socket_send, NULL, s);
+ s->send_fn = net_socket_send;
+ net_socket_read_poll(s, true);
}
static NetClientInfo net_socket_info = {
##
# @GuestFsFreezeStatus
#
-# An enumation of filesystem freeze states
+# An enumeration of filesystem freeze states
#
# @thawed: filesystems thawed/unfrozen
#
##
# @RunState
#
-# An enumation of VM run states.
+# An enumeration of VM run states.
#
# @debug: QEMU is running on a debugger
#
'postmigrate', 'prelaunch', 'finish-migrate', 'restore-vm',
'running', 'save-vm', 'shutdown', 'suspended', 'watchdog' ] }
+##
+# @SnapshotInfo
+#
+# @id: unique snapshot id
+#
+# @name: user chosen name
+#
+# @vm-state-size: size of the VM state
+#
+# @date-sec: UTC date of the snapshot in seconds
+#
+# @date-nsec: fractional part in nano seconds to be used with date-sec
+#
+# @vm-clock-sec: VM clock relative to boot in seconds
+#
+# @vm-clock-nsec: fractional part in nano seconds to be used with vm-clock-sec
+#
+# Since: 1.3
+#
+##
+
+{ 'type': 'SnapshotInfo',
+ 'data': { 'id': 'str', 'name': 'str', 'vm-state-size': 'int',
+ 'date-sec': 'int', 'date-nsec': 'int',
+ 'vm-clock-sec': 'int', 'vm-clock-nsec': 'int' } }
+
+##
+# @ImageInfo:
+#
+# Information about a QEMU image file
+#
+# @filename: name of the image file
+#
+# @format: format of the image file
+#
+# @virtual-size: maximum capacity in bytes of the image
+#
+# @actual-size: #optional actual size on disk in bytes of the image
+#
+# @dirty-flag: #optional true if image is not cleanly closed
+#
+# @cluster-size: #optional size of a cluster in bytes
+#
+# @encrypted: #optional true if the image is encrypted
+#
+# @backing-filename: #optional name of the backing file
+#
+# @full-backing-filename: #optional full path of the backing file
+#
+# @backing-filename-format: #optional the format of the backing file
+#
+# @snapshots: #optional list of VM snapshots
+#
+# Since: 1.3
+#
+##
+
+{ 'type': 'ImageInfo',
+ 'data': {'filename': 'str', 'format': 'str', '*dirty-flag': 'bool',
+ '*actual-size': 'int', 'virtual-size': 'int',
+ '*cluster-size': 'int', '*encrypted': 'bool',
+ '*backing-filename': 'str', '*full-backing-filename': 'str',
+ '*backing-filename-format': 'str', '*snapshots': ['SnapshotInfo'] } }
+
##
# @StatusInfo:
#
##
# @SpiceQueryMouseMode
#
-# An enumation of Spice mouse states.
+# An enumeration of Spice mouse states.
#
# @client: Mouse cursor position is determined by the client.
#
static void tcp_chr_accept(void *opaque);
-static void tcp_chr_connect(void *opaque);
-
static int tcp_chr_write(CharDriverState *chr, const uint8_t *buf, int len)
{
TCPCharDriver *s = chr->opaque;
if (s->connected) {
return send_all(s->fd, buf, len);
- } else if (s->listen_fd == -1) {
- /* (Re-)connect for unconnected writing */
- tcp_chr_connect(chr);
- return 0;
} else {
+ /* XXX: indicate an error ? */
return len;
}
}
TCPCharDriver *s = chr->opaque;
s->connected = 1;
- qemu_set_fd_handler2(s->fd, tcp_chr_read_poll,
- tcp_chr_read, NULL, chr);
+ if (s->fd >= 0) {
+ qemu_set_fd_handler2(s->fd, tcp_chr_read_poll,
+ tcp_chr_read, NULL, chr);
+ }
qemu_chr_generic_open(chr);
}
.name = "dump-guest-core",
.type = QEMU_OPT_BOOL,
.help = "Include guest memory in a core dump",
+ }, {
+ .name = "mem-merge",
+ .type = QEMU_OPT_BOOL,
+ .help = "enable/disable memory merge support",
},
{ /* End of list */ }
},
ETEXI
DEF("info", img_info,
- "info [-f fmt] filename")
+ "info [-f fmt] [--output=ofmt] filename")
STEXI
-@item info [-f @var{fmt}] @var{filename}
+@item info [-f @var{fmt}] [--output=@var{ofmt}] @var{filename}
ETEXI
DEF("snapshot", img_snapshot,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+#include "qapi-visit.h"
+#include "qapi/qmp-output-visitor.h"
+#include "qjson.h"
#include "qemu-common.h"
#include "qemu-option.h"
#include "qemu-error.h"
#include "osdep.h"
#include "sysemu.h"
#include "block_int.h"
+#include <getopt.h>
#include <stdio.h>
#ifdef _WIN32
" '-p' show progress of command (only certain commands)\n"
" '-S' indicates the consecutive number of bytes that must contain only zeros\n"
" for qemu-img to create a sparse image during conversion\n"
+ " '--output' takes the format in which the output must be done (human or json)\n"
"\n"
"Parameters to check subcommand:\n"
" '-r' tries to repair any inconsistencies that are found during the check.\n"
" '-r leaks' repairs only cluster leaks, whereas '-r all' fixes all\n"
" kinds of errors, with a higher risk of choosing the wrong fix or\n"
- " hiding corruption that has already occured.\n"
+ " hiding corruption that has already occurred.\n"
"\n"
"Parameters to snapshot subcommand:\n"
" 'snapshot' is the name of the snapshot to create, apply or delete\n"
static BlockDriverState *bdrv_new_open(const char *filename,
const char *fmt,
- int flags)
+ int flags,
+ bool require_io)
{
BlockDriverState *bs;
BlockDriver *drv;
goto fail;
}
- if (bdrv_is_encrypted(bs)) {
+ if (bdrv_is_encrypted(bs) && require_io) {
printf("Disk image '%s' is encrypted.\n", filename);
if (read_password(password, sizeof(password)) < 0) {
error_report("No password given");
}
filename = argv[optind++];
- bs = bdrv_new_open(filename, fmt, flags);
+ bs = bdrv_new_open(filename, fmt, flags, true);
if (!bs) {
return 1;
}
return -1;
}
- bs = bdrv_new_open(filename, fmt, flags);
+ bs = bdrv_new_open(filename, fmt, flags, true);
if (!bs) {
return 1;
}
total_sectors = 0;
for (bs_i = 0; bs_i < bs_n; bs_i++) {
- bs[bs_i] = bdrv_new_open(argv[optind + bs_i], fmt, BDRV_O_FLAGS);
+ bs[bs_i] = bdrv_new_open(argv[optind + bs_i], fmt, BDRV_O_FLAGS, true);
if (!bs[bs_i]) {
error_report("Could not open '%s'", argv[optind + bs_i]);
ret = -1;
return -1;
}
- out_bs = bdrv_new_open(out_filename, out_fmt, flags);
+ out_bs = bdrv_new_open(out_filename, out_fmt, flags, true);
if (!out_bs) {
ret = -1;
goto out;
g_free(sn_tab);
}
-static int img_info(int argc, char **argv)
+static void collect_snapshots(BlockDriverState *bs , ImageInfo *info)
+{
+ int i, sn_count;
+ QEMUSnapshotInfo *sn_tab = NULL;
+ SnapshotInfoList *info_list, *cur_item = NULL;
+ sn_count = bdrv_snapshot_list(bs, &sn_tab);
+
+ for (i = 0; i < sn_count; i++) {
+ info->has_snapshots = true;
+ info_list = g_new0(SnapshotInfoList, 1);
+
+ info_list->value = g_new0(SnapshotInfo, 1);
+ info_list->value->id = g_strdup(sn_tab[i].id_str);
+ info_list->value->name = g_strdup(sn_tab[i].name);
+ info_list->value->vm_state_size = sn_tab[i].vm_state_size;
+ info_list->value->date_sec = sn_tab[i].date_sec;
+ info_list->value->date_nsec = sn_tab[i].date_nsec;
+ info_list->value->vm_clock_sec = sn_tab[i].vm_clock_nsec / 1000000000;
+ info_list->value->vm_clock_nsec = sn_tab[i].vm_clock_nsec % 1000000000;
+
+ /* XXX: waiting for the qapi to support qemu-queue.h types */
+ if (!cur_item) {
+ info->snapshots = cur_item = info_list;
+ } else {
+ cur_item->next = info_list;
+ cur_item = info_list;
+ }
+
+ }
+
+ g_free(sn_tab);
+}
+
+static void dump_json_image_info(ImageInfo *info)
+{
+ Error *errp = NULL;
+ QString *str;
+ QmpOutputVisitor *ov = qmp_output_visitor_new();
+ QObject *obj;
+ visit_type_ImageInfo(qmp_output_get_visitor(ov),
+ &info, NULL, &errp);
+ obj = qmp_output_get_qobject(ov);
+ str = qobject_to_json_pretty(obj);
+ assert(str != NULL);
+ printf("%s\n", qstring_get_str(str));
+ qobject_decref(obj);
+ qmp_output_visitor_cleanup(ov);
+ QDECREF(str);
+}
+
+static void collect_image_info(BlockDriverState *bs,
+ ImageInfo *info,
+ const char *filename,
+ const char *fmt)
{
- int c;
- const char *filename, *fmt;
- BlockDriverState *bs;
- char size_buf[128], dsize_buf[128];
uint64_t total_sectors;
- int64_t allocated_size;
char backing_filename[1024];
char backing_filename2[1024];
BlockDriverInfo bdi;
+ bdrv_get_geometry(bs, &total_sectors);
+
+ info->filename = g_strdup(filename);
+ info->format = g_strdup(bdrv_get_format_name(bs));
+ info->virtual_size = total_sectors * 512;
+ info->actual_size = bdrv_get_allocated_file_size(bs);
+ info->has_actual_size = info->actual_size >= 0;
+ if (bdrv_is_encrypted(bs)) {
+ info->encrypted = true;
+ info->has_encrypted = true;
+ }
+ if (bdrv_get_info(bs, &bdi) >= 0) {
+ if (bdi.cluster_size != 0) {
+ info->cluster_size = bdi.cluster_size;
+ info->has_cluster_size = true;
+ }
+ info->dirty_flag = bdi.is_dirty;
+ info->has_dirty_flag = true;
+ }
+ bdrv_get_backing_filename(bs, backing_filename, sizeof(backing_filename));
+ if (backing_filename[0] != '\0') {
+ info->backing_filename = g_strdup(backing_filename);
+ info->has_backing_filename = true;
+ bdrv_get_full_backing_filename(bs, backing_filename2,
+ sizeof(backing_filename2));
+
+ if (strcmp(backing_filename, backing_filename2) != 0) {
+ info->full_backing_filename =
+ g_strdup(backing_filename2);
+ info->has_full_backing_filename = true;
+ }
+
+ if (bs->backing_format[0]) {
+ info->backing_filename_format = g_strdup(bs->backing_format);
+ info->has_backing_filename_format = true;
+ }
+ }
+}
+
+static void dump_human_image_info(ImageInfo *info)
+{
+ char size_buf[128], dsize_buf[128];
+ if (!info->has_actual_size) {
+ snprintf(dsize_buf, sizeof(dsize_buf), "unavailable");
+ } else {
+ get_human_readable_size(dsize_buf, sizeof(dsize_buf),
+ info->actual_size);
+ }
+ get_human_readable_size(size_buf, sizeof(size_buf), info->virtual_size);
+ printf("image: %s\n"
+ "file format: %s\n"
+ "virtual size: %s (%" PRId64 " bytes)\n"
+ "disk size: %s\n",
+ info->filename, info->format, size_buf,
+ info->virtual_size,
+ dsize_buf);
+
+ if (info->has_encrypted && info->encrypted) {
+ printf("encrypted: yes\n");
+ }
+
+ if (info->has_cluster_size) {
+ printf("cluster_size: %" PRId64 "\n", info->cluster_size);
+ }
+
+ if (info->has_dirty_flag && info->dirty_flag) {
+ printf("cleanly shut down: no\n");
+ }
+
+ if (info->has_backing_filename) {
+ printf("backing file: %s", info->backing_filename);
+ if (info->has_full_backing_filename) {
+ printf(" (actual path: %s)", info->full_backing_filename);
+ }
+ putchar('\n');
+ if (info->has_backing_filename_format) {
+ printf("backing file format: %s\n", info->backing_filename_format);
+ }
+ }
+}
+
+enum {OPTION_OUTPUT = 256};
+
+typedef enum OutputFormat {
+ OFORMAT_JSON,
+ OFORMAT_HUMAN,
+} OutputFormat;
+
+static int img_info(int argc, char **argv)
+{
+ int c;
+ OutputFormat output_format = OFORMAT_HUMAN;
+ const char *filename, *fmt, *output;
+ BlockDriverState *bs;
+ ImageInfo *info;
+
fmt = NULL;
+ output = NULL;
for(;;) {
- c = getopt(argc, argv, "f:h");
+ int option_index = 0;
+ static const struct option long_options[] = {
+ {"help", no_argument, 0, 'h'},
+ {"format", required_argument, 0, 'f'},
+ {"output", required_argument, 0, OPTION_OUTPUT},
+ {0, 0, 0, 0}
+ };
+ c = getopt_long(argc, argv, "f:h",
+ long_options, &option_index);
if (c == -1) {
break;
}
case 'f':
fmt = optarg;
break;
+ case OPTION_OUTPUT:
+ output = optarg;
+ break;
}
}
if (optind >= argc) {
}
filename = argv[optind++];
- bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_NO_BACKING);
- if (!bs) {
+ if (output && !strcmp(output, "json")) {
+ output_format = OFORMAT_JSON;
+ } else if (output && !strcmp(output, "human")) {
+ output_format = OFORMAT_HUMAN;
+ } else if (output) {
+ error_report("--output must be used with human or json as argument.");
return 1;
}
- bdrv_get_geometry(bs, &total_sectors);
- get_human_readable_size(size_buf, sizeof(size_buf), total_sectors * 512);
- allocated_size = bdrv_get_allocated_file_size(bs);
- if (allocated_size < 0) {
- snprintf(dsize_buf, sizeof(dsize_buf), "unavailable");
- } else {
- get_human_readable_size(dsize_buf, sizeof(dsize_buf),
- allocated_size);
- }
- printf("image: %s\n"
- "file format: %s\n"
- "virtual size: %s (%" PRId64 " bytes)\n"
- "disk size: %s\n",
- filename, bdrv_get_format_name(bs), size_buf,
- (total_sectors * 512),
- dsize_buf);
- if (bdrv_is_encrypted(bs)) {
- printf("encrypted: yes\n");
- }
- if (bdrv_get_info(bs, &bdi) >= 0) {
- if (bdi.cluster_size != 0) {
- printf("cluster_size: %d\n", bdi.cluster_size);
- }
- if (bdi.is_dirty) {
- printf("cleanly shut down: no\n");
- }
+
+ bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_NO_BACKING, false);
+ if (!bs) {
+ return 1;
}
- bdrv_get_backing_filename(bs, backing_filename, sizeof(backing_filename));
- if (backing_filename[0] != '\0') {
- bdrv_get_full_backing_filename(bs, backing_filename2,
- sizeof(backing_filename2));
- printf("backing file: %s", backing_filename);
- if (strcmp(backing_filename, backing_filename2) != 0) {
- printf(" (actual path: %s)", backing_filename2);
- }
- putchar('\n');
+
+ info = g_new0(ImageInfo, 1);
+ collect_image_info(bs, info, filename, fmt);
+
+ switch (output_format) {
+ case OFORMAT_HUMAN:
+ dump_human_image_info(info);
+ dump_snapshots(bs);
+ break;
+ case OFORMAT_JSON:
+ collect_snapshots(bs, info);
+ dump_json_image_info(info);
+ break;
}
- dump_snapshots(bs);
+
+ qapi_free_ImageInfo(info);
bdrv_delete(bs);
return 0;
}
filename = argv[optind++];
/* Open the image */
- bs = bdrv_new_open(filename, NULL, bdrv_oflags);
+ bs = bdrv_new_open(filename, NULL, bdrv_oflags, true);
if (!bs) {
return 1;
}
* Ignore the old backing file for unsafe rebase in case we want to correct
* the reference to a renamed or moved backing file.
*/
- bs = bdrv_new_open(filename, fmt, flags);
+ bs = bdrv_new_open(filename, fmt, flags, true);
if (!bs) {
return 1;
}
n = qemu_opt_get_size(param, BLOCK_OPT_SIZE, 0);
qemu_opts_del(param);
- bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_RDWR);
+ bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_RDWR, true);
if (!bs) {
ret = -1;
goto out;
If @code{-r} is specified, qemu-img tries to repair any inconsistencies found
during the check. @code{-r leaks} repairs only cluster leaks, whereas
@code{-r all} fixes all kinds of errors, with a higher risk of choosing the
-wrong fix or hiding corruption that has already occured.
+wrong fix or hiding corruption that has already occurred.
Only the formats @code{qcow2}, @code{qed} and @code{vdi} support
consistency checks.
@var{backing_file} should have the same content as the input's base image,
however the path, image format, etc may differ.
-@item info [-f @var{fmt}] @var{filename}
+@item info [-f @var{fmt}] [--output=@var{ofmt}] @var{filename}
Give information about the disk image @var{filename}. Use it in
particular to know the size reserved on disk which can be different
from the displayed size. If VM snapshots are stored in the disk image,
-they are displayed too.
+they are displayed too. The command can output in the format @var{ofmt}
+which is either @code{human} or @code{json}.
@item snapshot [-l | -a @var{snapshot} | -c @var{snapshot} | -d @var{snapshot} ] @var{filename}
" supported accelerators are kvm, xen, tcg (default: tcg)\n"
" kernel_irqchip=on|off controls accelerated irqchip support\n"
" kvm_shadow_mem=size of KVM shadow MMU\n"
- " dump-guest-core=on|off include guest memory in a core dump (default=on)\n",
+ " dump-guest-core=on|off include guest memory in a core dump (default=on)\n"
+ " mem-merge=on|off controls memory merge support (default: on)\n",
QEMU_ARCH_ALL)
STEXI
@item -machine [type=]@var{name}[,prop=@var{value}[,...]]
Defines the size of the KVM shadow MMU.
@item dump-guest-core=on|off
Include guest memory in a core dump. The default is on.
+@item mem-merge=on|off
+Enables or disables memory merge support. This feature, when supported by
+the host, de-duplicates identical memory pages among VMs instances
+(enabled by default).
@end table
ETEXI
Not all devices are supported on all targets. Use -net nic,model=?
for a list of available devices for your target.
+@item -netdev user,id=@var{id}[,@var{option}][,@var{option}][,...]
@item -net user[,@var{option}][,@var{option}][,...]
Use the user mode network stack which requires no administrator
privilege to run. Valid options are:
@item vlan=@var{n}
Connect user mode stack to VLAN @var{n} (@var{n} = 0 is the default).
+@item id=@var{id}
@item name=@var{name}
Assign symbolic name for use in monitor commands.
syntax gives undefined results. Their use for new applications is discouraged
as they will be removed from future versions.
+@item -netdev tap,id=@var{id}[,fd=@var{h}][,ifname=@var{name}][,script=@var{file}][,downscript=@var{dfile}][,helper=@var{helper}]
@item -net tap[,vlan=@var{n}][,name=@var{name}][,fd=@var{h}][,ifname=@var{name}][,script=@var{file}][,downscript=@var{dfile}][,helper=@var{helper}]
Connect the host TAP network interface @var{name} to VLAN @var{n}.
-net nic -net tap,"helper=/usr/local/libexec/qemu-bridge-helper"
@end example
+@item -netdev bridge,id=@var{id}[,br=@var{bridge}][,helper=@var{helper}]
@item -net bridge[,vlan=@var{n}][,name=@var{name}][,br=@var{bridge}][,helper=@var{helper}]
Connect a host TAP network interface to a host bridge device.
qemu-system-i386 linux.img -net bridge,br=qemubr0 -net nic,model=virtio
@end example
+@item -netdev socket,id=@var{id}[,fd=@var{h}][,listen=[@var{host}]:@var{port}][,connect=@var{host}:@var{port}]
@item -net socket[,vlan=@var{n}][,name=@var{name}][,fd=@var{h}] [,listen=[@var{host}]:@var{port}][,connect=@var{host}:@var{port}]
Connect the VLAN @var{n} to a remote VLAN in another QEMU virtual
-net socket,connect=127.0.0.1:1234
@end example
+@item -netdev socket,id=@var{id}[,fd=@var{h}][,mcast=@var{maddr}:@var{port}[,localaddr=@var{addr}]]
@item -net socket[,vlan=@var{n}][,name=@var{name}][,fd=@var{h}][,mcast=@var{maddr}:@var{port}[,localaddr=@var{addr}]]
Create a VLAN @var{n} shared with another QEMU virtual
-net socket,mcast=239.192.168.1:1102,localaddr=1.2.3.4
@end example
+@item -netdev vde,id=@var{id}[,sock=@var{socketpath}][,port=@var{n}][,group=@var{groupname}][,mode=@var{octalmode}]
@item -net vde[,vlan=@var{n}][,name=@var{name}][,sock=@var{socketpath}] [,port=@var{n}][,group=@var{groupname}][,mode=@var{octalmode}]
Connect VLAN @var{n} to PORT @var{n} of a vde switch running on host and
listening for incoming connections on @var{socketpath}. Use GROUP @var{groupname}
output="$PWD"
fi
-for arch in x86 powerpc s390; do
+# This will pick up non-directories too (eg "Kconfig") but we will
+# ignore them in the next loop.
+ARCHLIST=$(cd "$linux/arch" && echo *)
+
+for arch in $ARCHLIST; do
+ # Discard anything which isn't a KVM-supporting architecture
+ if ! [ -e "$linux/arch/$arch/include/asm/kvm.h" ]; then
+ continue
+ fi
+
+ # Blacklist architectures which have KVM headers but are actually dead
+ if [ "$arch" = "ia64" ]; then
+ continue
+ fi
+
make -C "$linux" INSTALL_HDR_PATH="$tmpdir" SRCARCH=$arch headers_install
rm -rf "$output/linux-headers/asm-$arch"
#ifndef SOFTMMU_DEFS_H
#define SOFTMMU_DEFS_H
-#ifndef CONFIG_TCG_PASS_AREG0
-uint8_t __ldb_mmu(target_ulong addr, int mmu_idx);
-void __stb_mmu(target_ulong addr, uint8_t val, int mmu_idx);
-uint16_t __ldw_mmu(target_ulong addr, int mmu_idx);
-void __stw_mmu(target_ulong addr, uint16_t val, int mmu_idx);
-uint32_t __ldl_mmu(target_ulong addr, int mmu_idx);
-void __stl_mmu(target_ulong addr, uint32_t val, int mmu_idx);
-uint64_t __ldq_mmu(target_ulong addr, int mmu_idx);
-void __stq_mmu(target_ulong addr, uint64_t val, int mmu_idx);
-
-uint8_t __ldb_cmmu(target_ulong addr, int mmu_idx);
-void __stb_cmmu(target_ulong addr, uint8_t val, int mmu_idx);
-uint16_t __ldw_cmmu(target_ulong addr, int mmu_idx);
-void __stw_cmmu(target_ulong addr, uint16_t val, int mmu_idx);
-uint32_t __ldl_cmmu(target_ulong addr, int mmu_idx);
-void __stl_cmmu(target_ulong addr, uint32_t val, int mmu_idx);
-uint64_t __ldq_cmmu(target_ulong addr, int mmu_idx);
-void __stq_cmmu(target_ulong addr, uint64_t val, int mmu_idx);
-#else
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx);
void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx);
#endif
-
-#endif
#define ADDR_READ addr_read
#endif
-#ifndef CONFIG_TCG_PASS_AREG0
-#define ENV_PARAM
-#define ENV_VAR
-#define CPU_PREFIX
-#define HELPER_PREFIX __
-#else
-#define ENV_PARAM CPUArchState *env,
-#define ENV_VAR env,
-#define CPU_PREFIX cpu_
-#define HELPER_PREFIX helper_
-#endif
-
/* generic load/store macros */
static inline RES_TYPE
-glue(glue(glue(CPU_PREFIX, ld), USUFFIX), MEMSUFFIX)(ENV_PARAM
- target_ulong ptr)
+glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
int page_index;
RES_TYPE res;
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- res = glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_VAR
- addr,
- mmu_idx);
+ res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
#if DATA_SIZE <= 2
static inline int
-glue(glue(glue(CPU_PREFIX, lds), SUFFIX), MEMSUFFIX)(ENV_PARAM
- target_ulong ptr)
+glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
int res, page_index;
target_ulong addr;
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- res = (DATA_STYPE)glue(glue(glue(HELPER_PREFIX, ld), SUFFIX),
- MMUSUFFIX)(ENV_VAR addr, mmu_idx);
+ res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
+ MMUSUFFIX)(env, addr, mmu_idx);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
/* generic store macro */
static inline void
-glue(glue(glue(CPU_PREFIX, st), SUFFIX), MEMSUFFIX)(ENV_PARAM target_ulong ptr,
- RES_TYPE v)
+glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
+ RES_TYPE v)
{
int page_index;
target_ulong addr;
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_VAR addr, v,
- mmu_idx);
+ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
glue(glue(st, SUFFIX), _raw)(hostaddr, v);
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
#if DATA_SIZE == 8
-static inline float64 glue(glue(CPU_PREFIX, ldfq), MEMSUFFIX)(ENV_PARAM
- target_ulong ptr)
+static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr)
{
union {
float64 d;
uint64_t i;
} u;
- u.i = glue(glue(CPU_PREFIX, ldq), MEMSUFFIX)(ENV_VAR ptr);
+ u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr);
return u.d;
}
-static inline void glue(glue(CPU_PREFIX, stfq), MEMSUFFIX)(ENV_PARAM
- target_ulong ptr,
- float64 v)
+static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr, float64 v)
{
union {
float64 d;
uint64_t i;
} u;
u.d = v;
- glue(glue(CPU_PREFIX, stq), MEMSUFFIX)(ENV_VAR ptr, u.i);
+ glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i);
}
#endif /* DATA_SIZE == 8 */
#if DATA_SIZE == 4
-static inline float32 glue(glue(CPU_PREFIX, ldfl), MEMSUFFIX)(ENV_PARAM
- target_ulong ptr)
+static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr)
{
union {
float32 f;
uint32_t i;
} u;
- u.i = glue(glue(CPU_PREFIX, ldl), MEMSUFFIX)(ENV_VAR ptr);
+ u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr);
return u.f;
}
-static inline void glue(glue(CPU_PREFIX, stfl), MEMSUFFIX)(ENV_PARAM
- target_ulong ptr,
- float32 v)
+static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr, float32 v)
{
union {
float32 f;
uint32_t i;
} u;
u.f = v;
- glue(glue(CPU_PREFIX, stl), MEMSUFFIX)(ENV_VAR ptr, u.i);
+ glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i);
}
#endif /* DATA_SIZE == 4 */
#undef CPU_MMU_INDEX
#undef MMUSUFFIX
#undef ADDR_READ
-#undef ENV_PARAM
-#undef ENV_VAR
-#undef CPU_PREFIX
-#undef HELPER_PREFIX
#define ADDR_READ addr_read
#endif
-#ifndef CONFIG_TCG_PASS_AREG0
-#define ENV_PARAM
-#define ENV_VAR
-#define CPU_PREFIX
-#define HELPER_PREFIX __
-#else
-#define ENV_PARAM CPUArchState *env,
-#define ENV_VAR env,
-#define CPU_PREFIX cpu_
-#define HELPER_PREFIX helper_
-#endif
-
-static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
+static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr,
int mmu_idx,
uintptr_t retaddr);
-static inline DATA_TYPE glue(io_read, SUFFIX)(ENV_PARAM
+static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
target_phys_addr_t physaddr,
target_ulong addr,
uintptr_t retaddr)
/* handle all cases except unaligned access which span two pages */
DATA_TYPE
-glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_PARAM
- target_ulong addr,
- int mmu_idx)
+glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
+ int mmu_idx)
{
DATA_TYPE res;
int index;
goto do_unaligned_access;
retaddr = GETPC();
ioaddr = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr);
+ res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
retaddr = GETPC();
#ifdef ALIGNED_ONLY
- do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif
- res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr,
+ res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
mmu_idx, retaddr);
} else {
/* unaligned/aligned access in the same page */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
retaddr = GETPC();
- do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
addend = env->tlb_table[mmu_idx][index].addend;
retaddr = GETPC();
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0)
- do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
goto redo;
/* handle all unaligned cases */
static DATA_TYPE
-glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
+glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr,
int mmu_idx,
uintptr_t retaddr)
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
ioaddr = env->iotlb[mmu_idx][index];
- res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr);
+ res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* slow unaligned access (it spans two pages) */
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr1,
+ res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
mmu_idx, retaddr);
- res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr2,
+ res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
mmu_idx, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
#ifndef SOFTMMU_CODE_ACCESS
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr,
DATA_TYPE val,
int mmu_idx,
uintptr_t retaddr);
-static inline void glue(io_write, SUFFIX)(ENV_PARAM
+static inline void glue(io_write, SUFFIX)(CPUArchState *env,
target_phys_addr_t physaddr,
DATA_TYPE val,
target_ulong addr,
#endif /* SHIFT > 2 */
}
-void glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_PARAM
- target_ulong addr,
- DATA_TYPE val,
- int mmu_idx)
+void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, DATA_TYPE val,
+ int mmu_idx)
{
target_phys_addr_t ioaddr;
target_ulong tlb_addr;
goto do_unaligned_access;
retaddr = GETPC();
ioaddr = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr);
+ glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
retaddr = GETPC();
#ifdef ALIGNED_ONLY
- do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
#endif
- glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_VAR addr, val,
+ glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
mmu_idx, retaddr);
} else {
/* aligned/unaligned access in the same page */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
retaddr = GETPC();
- do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
#endif
addend = env->tlb_table[mmu_idx][index].addend;
retaddr = GETPC();
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0)
- do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
#endif
tlb_fill(env, addr, 1, mmu_idx, retaddr);
goto redo;
}
/* handles all unaligned cases */
-static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr,
DATA_TYPE val,
int mmu_idx,
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
ioaddr = env->iotlb[mmu_idx][index];
- glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr);
+ glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* XXX: not efficient, but simple */
* previous page from the TLB cache. */
for(i = DATA_SIZE - 1; i >= 0; i--) {
#ifdef TARGET_WORDS_BIGENDIAN
- glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i,
+ glue(slow_stb, MMUSUFFIX)(env, addr + i,
val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
mmu_idx, retaddr);
#else
- glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i,
+ glue(slow_stb, MMUSUFFIX)(env, addr + i,
val >> (i * 8),
mmu_idx, retaddr);
#endif
#undef USUFFIX
#undef DATA_SIZE
#undef ADDR_READ
-#undef ENV_PARAM
-#undef ENV_VAR
-#undef CPU_PREFIX
-#undef HELPER_PREFIX
obj-$(CONFIG_SOFTMMU) += machine.o
obj-y += translate.o op_helper.o helper.o cpu.o
obj-y += neon_helper.o iwmmxt_helper.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
}
/* Load an instruction and return it in the standard little-endian order */
-static inline uint32_t arm_ldl_code(uint32_t addr, bool do_swap)
+static inline uint32_t arm_ldl_code(CPUARMState *env, uint32_t addr,
+ bool do_swap)
{
- uint32_t insn = ldl_code(addr);
+ uint32_t insn = cpu_ldl_code(env, addr);
if (do_swap) {
return bswap32(insn);
}
}
/* Ditto, for a halfword (Thumb) instruction */
-static inline uint16_t arm_lduw_code(uint32_t addr, bool do_swap)
+static inline uint16_t arm_lduw_code(CPUARMState *env, uint32_t addr,
+ bool do_swap)
{
- uint16_t insn = lduw_code(addr);
+ uint16_t insn = cpu_lduw_code(env, addr);
if (do_swap) {
return bswap16(insn);
}
case EXCP_BKPT:
if (semihosting_enabled) {
int nr;
- nr = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff;
+ nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
if (nr == 0xab) {
env->regs[15] += 2;
env->regs[0] = do_arm_semihosting(env);
if (semihosting_enabled) {
/* Check for semihosting interrupt. */
if (env->thumb) {
- mask = arm_lduw_code(env->regs[15] - 2, env->bswap_code) & 0xff;
+ mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code)
+ & 0xff;
} else {
- mask = arm_ldl_code(env->regs[15] - 4, env->bswap_code)
+ mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code)
& 0xffffff;
}
/* Only intercept calls from privileged modes, to provide some
case EXCP_BKPT:
/* See if this is a semihosting syscall. */
if (env->thumb && semihosting_enabled) {
- mask = arm_lduw_code(env->regs[15], env->bswap_code) & 0xff;
+ mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
if (mask == 0xab
&& (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
env->regs[15] += 2;
DEF_HELPER_1(sxtb16, i32, i32)
DEF_HELPER_1(uxtb16, i32, i32)
-DEF_HELPER_2(add_setq, i32, i32, i32)
-DEF_HELPER_2(add_saturate, i32, i32, i32)
-DEF_HELPER_2(sub_saturate, i32, i32, i32)
-DEF_HELPER_2(add_usaturate, i32, i32, i32)
-DEF_HELPER_2(sub_usaturate, i32, i32, i32)
-DEF_HELPER_1(double_saturate, i32, s32)
+DEF_HELPER_3(add_setq, i32, env, i32, i32)
+DEF_HELPER_3(add_saturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
+DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
+DEF_HELPER_2(double_saturate, i32, env, s32)
DEF_HELPER_2(sdiv, s32, s32, s32)
DEF_HELPER_2(udiv, i32, i32, i32)
DEF_HELPER_1(rbit, i32, i32)
PAS_OP(uh)
#undef PAS_OP
-DEF_HELPER_2(ssat, i32, i32, i32)
-DEF_HELPER_2(usat, i32, i32, i32)
-DEF_HELPER_2(ssat16, i32, i32, i32)
-DEF_HELPER_2(usat16, i32, i32, i32)
+DEF_HELPER_3(ssat, i32, env, i32, i32)
+DEF_HELPER_3(usat, i32, env, i32, i32)
+DEF_HELPER_3(ssat16, i32, env, i32, i32)
+DEF_HELPER_3(usat16, i32, env, i32, i32)
DEF_HELPER_2(usad8, i32, i32, i32)
DEF_HELPER_1(logicq_cc, i32, i64)
DEF_HELPER_3(sel_flags, i32, i32, i32, i32)
-DEF_HELPER_1(exception, void, i32)
-DEF_HELPER_0(wfi, void)
+DEF_HELPER_2(exception, void, env, i32)
+DEF_HELPER_1(wfi, void, env)
-DEF_HELPER_2(cpsr_write, void, i32, i32)
-DEF_HELPER_0(cpsr_read, i32)
+DEF_HELPER_3(cpsr_write, void, env, i32, i32)
+DEF_HELPER_1(cpsr_read, i32, env)
DEF_HELPER_3(v7m_msr, void, env, i32, i32)
DEF_HELPER_2(v7m_mrs, i32, env, i32)
DEF_HELPER_2(get_r13_banked, i32, env, i32)
DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
-DEF_HELPER_1(get_user_reg, i32, i32)
-DEF_HELPER_2(set_user_reg, void, i32, i32)
+DEF_HELPER_2(get_user_reg, i32, env, i32)
+DEF_HELPER_3(set_user_reg, void, env, i32, i32)
DEF_HELPER_1(vfp_get_fpscr, i32, env)
DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
DEF_HELPER_2(rsqrte_f32, f32, f32, env)
DEF_HELPER_2(recpe_u32, i32, i32, env)
DEF_HELPER_2(rsqrte_u32, i32, i32, env)
-DEF_HELPER_4(neon_tbl, i32, i32, i32, i32, i32)
-
-DEF_HELPER_2(add_cc, i32, i32, i32)
-DEF_HELPER_2(adc_cc, i32, i32, i32)
-DEF_HELPER_2(sub_cc, i32, i32, i32)
-DEF_HELPER_2(sbc_cc, i32, i32, i32)
-
-DEF_HELPER_2(shl, i32, i32, i32)
-DEF_HELPER_2(shr, i32, i32, i32)
-DEF_HELPER_2(sar, i32, i32, i32)
-DEF_HELPER_2(shl_cc, i32, i32, i32)
-DEF_HELPER_2(shr_cc, i32, i32, i32)
-DEF_HELPER_2(sar_cc, i32, i32, i32)
-DEF_HELPER_2(ror_cc, i32, i32, i32)
+DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32)
+
+DEF_HELPER_3(add_cc, i32, env, i32, i32)
+DEF_HELPER_3(adc_cc, i32, env, i32, i32)
+DEF_HELPER_3(sub_cc, i32, env, i32, i32)
+DEF_HELPER_3(sbc_cc, i32, env, i32, i32)
+
+DEF_HELPER_3(shl, i32, env, i32, i32)
+DEF_HELPER_3(shr, i32, env, i32, i32)
+DEF_HELPER_3(sar, i32, env, i32, i32)
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
/* neon_helper.c */
DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cpu.h"
-#include "dyngen-exec.h"
#include "helper.h"
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
-static void raise_exception(int tt)
+static void raise_exception(CPUARMState *env, int tt)
{
env->exception_index = tt;
cpu_loop_exit(env);
}
-uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def,
+uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
uint32_t rn, uint32_t maxindex)
{
uint32_t val;
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUARMState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUARMState *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
TranslationBlock *tb;
- CPUARMState *saved_env;
int ret;
- saved_env = env;
- env = env1;
ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
cpu_restore_state(tb, env, retaddr);
}
}
- raise_exception(env->exception_index);
+ raise_exception(env, env->exception_index);
}
- env = saved_env;
}
#endif
/* FIXME: Pass an explicit pointer to QF to CPUARMState, and move saturating
instructions into helper.c */
-uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
+uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
return res;
}
-uint32_t HELPER(add_saturate)(uint32_t a, uint32_t b)
+uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
return res;
}
-uint32_t HELPER(sub_saturate)(uint32_t a, uint32_t b)
+uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
return res;
}
-uint32_t HELPER(double_saturate)(int32_t val)
+uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
{
uint32_t res;
if (val >= 0x40000000) {
return res;
}
-uint32_t HELPER(add_usaturate)(uint32_t a, uint32_t b)
+uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (res < a) {
return res;
}
-uint32_t HELPER(sub_usaturate)(uint32_t a, uint32_t b)
+uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (res > a) {
}
/* Signed saturation. */
-static inline uint32_t do_ssat(int32_t val, int shift)
+static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
{
int32_t top;
uint32_t mask;
}
/* Unsigned saturation. */
-static inline uint32_t do_usat(int32_t val, int shift)
+static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
{
uint32_t max;
}
/* Signed saturate. */
-uint32_t HELPER(ssat)(uint32_t x, uint32_t shift)
+uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
{
- return do_ssat(x, shift);
+ return do_ssat(env, x, shift);
}
/* Dual halfword signed saturate. */
-uint32_t HELPER(ssat16)(uint32_t x, uint32_t shift)
+uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
{
uint32_t res;
- res = (uint16_t)do_ssat((int16_t)x, shift);
- res |= do_ssat(((int32_t)x) >> 16, shift) << 16;
+ res = (uint16_t)do_ssat(env, (int16_t)x, shift);
+ res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
return res;
}
/* Unsigned saturate. */
-uint32_t HELPER(usat)(uint32_t x, uint32_t shift)
+uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
{
- return do_usat(x, shift);
+ return do_usat(env, x, shift);
}
/* Dual halfword unsigned saturate. */
-uint32_t HELPER(usat16)(uint32_t x, uint32_t shift)
+uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
{
uint32_t res;
- res = (uint16_t)do_usat((int16_t)x, shift);
- res |= do_usat(((int32_t)x) >> 16, shift) << 16;
+ res = (uint16_t)do_usat(env, (int16_t)x, shift);
+ res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
return res;
}
-void HELPER(wfi)(void)
+void HELPER(wfi)(CPUARMState *env)
{
env->exception_index = EXCP_HLT;
env->halted = 1;
cpu_loop_exit(env);
}
-void HELPER(exception)(uint32_t excp)
+void HELPER(exception)(CPUARMState *env, uint32_t excp)
{
env->exception_index = excp;
cpu_loop_exit(env);
}
-uint32_t HELPER(cpsr_read)(void)
+uint32_t HELPER(cpsr_read)(CPUARMState *env)
{
return cpsr_read(env) & ~CPSR_EXEC;
}
-void HELPER(cpsr_write)(uint32_t val, uint32_t mask)
+void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
{
cpsr_write(env, val, mask);
}
/* Access to user mode registers from privileged modes. */
-uint32_t HELPER(get_user_reg)(uint32_t regno)
+uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
{
uint32_t val;
return val;
}
-void HELPER(set_user_reg)(uint32_t regno, uint32_t val)
+void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
{
if (regno == 13) {
env->banked_r13[0] = val;
const ARMCPRegInfo *ri = rip;
int excp = ri->writefn(env, ri, value);
if (excp) {
- raise_exception(excp);
+ raise_exception(env, excp);
}
}
uint64_t value;
int excp = ri->readfn(env, ri, &value);
if (excp) {
- raise_exception(excp);
+ raise_exception(env, excp);
}
return value;
}
const ARMCPRegInfo *ri = rip;
int excp = ri->writefn(env, ri, value);
if (excp) {
- raise_exception(excp);
+ raise_exception(env, excp);
}
}
uint64_t value;
int excp = ri->readfn(env, ri, &value);
if (excp) {
- raise_exception(excp);
+ raise_exception(env, excp);
}
return value;
}
The only way to do that in TCG is a conditional branch, which clobbers
all our temporaries. For now implement these as helper functions. */
-uint32_t HELPER (add_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER (add_cc)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t result;
result = a + b;
return result;
}
-uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(adc_cc)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t result;
if (!env->CF) {
return result;
}
-uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(sub_cc)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t result;
result = a - b;
return result;
}
-uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(sbc_cc)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t result;
if (!env->CF) {
/* Similarly for variable shift instructions. */
-uint32_t HELPER(shl)(uint32_t x, uint32_t i)
+uint32_t HELPER(shl)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32)
return x << shift;
}
-uint32_t HELPER(shr)(uint32_t x, uint32_t i)
+uint32_t HELPER(shr)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32)
return (uint32_t)x >> shift;
}
-uint32_t HELPER(sar)(uint32_t x, uint32_t i)
+uint32_t HELPER(sar)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32)
return (int32_t)x >> shift;
}
-uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift1, shift;
shift1 = i & 0xff;
static inline void gen_set_cpsr(TCGv var, uint32_t mask)
{
TCGv tmp_mask = tcg_const_i32(mask);
- gen_helper_cpsr_write(var, tmp_mask);
+ gen_helper_cpsr_write(cpu_env, var, tmp_mask);
tcg_temp_free_i32(tmp_mask);
}
/* Set NZCV flags from the high 4 bits of var. */
{
TCGv tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, excp);
- gen_helper_exception(tmp);
+ gen_helper_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
}
{
if (flags) {
switch (shiftop) {
- case 0: gen_helper_shl_cc(var, var, shift); break;
- case 1: gen_helper_shr_cc(var, var, shift); break;
- case 2: gen_helper_sar_cc(var, var, shift); break;
- case 3: gen_helper_ror_cc(var, var, shift); break;
+ case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
+ case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
+ case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
+ case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
}
} else {
switch (shiftop) {
- case 0: gen_helper_shl(var, var, shift); break;
- case 1: gen_helper_shr(var, var, shift); break;
- case 2: gen_helper_sar(var, var, shift); break;
+ case 0: gen_helper_shl(var, cpu_env, var, shift); break;
+ case 1: gen_helper_shr(var, cpu_env, var, shift); break;
+ case 2: gen_helper_sar(var, cpu_env, var, shift); break;
case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
tcg_gen_rotr_i32(var, var, shift); break;
}
tmp2 = neon_load_reg(rm, 0);
tmp4 = tcg_const_i32(rn);
tmp5 = tcg_const_i32(n);
- gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
+ gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
tcg_temp_free_i32(tmp);
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 1);
tcg_gen_movi_i32(tmp, 0);
}
tmp3 = neon_load_reg(rm, 1);
- gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
+ gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
tcg_temp_free_i32(tmp5);
tcg_temp_free_i32(tmp4);
neon_store_reg(rd, 0, tmp2);
TCGv addr;
TCGv_i64 tmp64;
- insn = arm_ldl_code(s->pc, s->bswap_code);
+ insn = arm_ldl_code(env, s->pc, s->bswap_code);
s->pc += 4;
/* M variants do not implement ARM mode. */
tmp = load_cpu_field(spsr);
} else {
tmp = tcg_temp_new_i32();
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
}
store_reg(s, rd, tmp);
}
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rn);
if (op1 & 2)
- gen_helper_double_saturate(tmp2, tmp2);
+ gen_helper_double_saturate(tmp2, cpu_env, tmp2);
if (op1 & 1)
- gen_helper_sub_saturate(tmp, tmp, tmp2);
+ gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_add_saturate(tmp, tmp, tmp2);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
tcg_temp_free_i64(tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
} else {
if (op1 == 0) {
tmp2 = load_reg(s, rn);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
if (IS_USER(s)) {
goto illegal_op;
}
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
gen_exception_return(s, tmp);
} else {
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
} else {
tcg_gen_sub_i32(tmp, tmp, tmp2);
}
break;
case 0x03:
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp2, tmp);
+ gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
} else {
tcg_gen_sub_i32(tmp, tmp2, tmp);
}
break;
case 0x04:
if (set_cc) {
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
break;
case 0x05:
if (set_cc) {
- gen_helper_adc_cc(tmp, tmp, tmp2);
+ gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
} else {
gen_add_carry(tmp, tmp, tmp2);
}
break;
case 0x06:
if (set_cc) {
- gen_helper_sbc_cc(tmp, tmp, tmp2);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
} else {
gen_sub_carry(tmp, tmp, tmp2);
}
break;
case 0x07:
if (set_cc) {
- gen_helper_sbc_cc(tmp, tmp2, tmp);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
} else {
gen_sub_carry(tmp, tmp2, tmp);
}
break;
case 0x0a:
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp);
break;
case 0x0b:
if (set_cc) {
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp);
break;
sh = (insn >> 16) & 0x1f;
tmp2 = tcg_const_i32(sh);
if (insn & (1 << 22))
- gen_helper_usat(tmp, tmp, tmp2);
+ gen_helper_usat(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat(tmp, tmp, tmp2);
+ gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00300fe0) == 0x00200f20) {
sh = (insn >> 16) & 0x1f;
tmp2 = tcg_const_i32(sh);
if (insn & (1 << 22))
- gen_helper_usat16(tmp, tmp, tmp2);
+ gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat16(tmp, tmp, tmp2);
+ gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00700fe0) == 0x00000fa0) {
* however it may overflow considered as a signed
* operation, in which case we must set the Q flag.
*/
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
if (insn & (1 << 22)) {
if (rd != 15)
{
tmp2 = load_reg(s, rd);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rn, tmp);
tmp = gen_ld32(addr, IS_USER(s));
if (user) {
tmp2 = tcg_const_i32(i);
- gen_helper_set_user_reg(tmp2, tmp);
+ gen_helper_set_user_reg(cpu_env, tmp2, tmp);
tcg_temp_free_i32(tmp2);
tcg_temp_free_i32(tmp);
} else if (i == rn) {
} else if (user) {
tmp = tcg_temp_new_i32();
tmp2 = tcg_const_i32(i);
- gen_helper_get_user_reg(tmp, tmp2);
+ gen_helper_get_user_reg(tmp, cpu_env, tmp2);
tcg_temp_free_i32(tmp2);
} else {
tmp = load_reg(s, i);
break;
case 8: /* add */
if (conds)
- gen_helper_add_cc(t0, t0, t1);
+ gen_helper_add_cc(t0, cpu_env, t0, t1);
else
tcg_gen_add_i32(t0, t0, t1);
break;
case 10: /* adc */
if (conds)
- gen_helper_adc_cc(t0, t0, t1);
+ gen_helper_adc_cc(t0, cpu_env, t0, t1);
else
gen_adc(t0, t1);
break;
case 11: /* sbc */
if (conds)
- gen_helper_sbc_cc(t0, t0, t1);
+ gen_helper_sbc_cc(t0, cpu_env, t0, t1);
else
gen_sub_carry(t0, t0, t1);
break;
case 13: /* sub */
if (conds)
- gen_helper_sub_cc(t0, t0, t1);
+ gen_helper_sub_cc(t0, cpu_env, t0, t1);
else
tcg_gen_sub_i32(t0, t0, t1);
break;
case 14: /* rsb */
if (conds)
- gen_helper_sub_cc(t0, t1, t0);
+ gen_helper_sub_cc(t0, cpu_env, t1, t0);
else
tcg_gen_sub_i32(t0, t1, t0);
break;
/* Fall through to 32-bit decode. */
}
- insn = arm_lduw_code(s->pc, s->bswap_code);
+ insn = arm_lduw_code(env, s->pc, s->bswap_code);
s->pc += 2;
insn |= (uint32_t)insn_hw1 << 16;
gen_st32(tmp, addr, 0);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
gen_st32(tmp, addr, 0);
if (insn & (1 << 21)) {
if ((insn & (1 << 24)) == 0) {
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if (op & 1)
- gen_helper_double_saturate(tmp, tmp);
+ gen_helper_double_saturate(tmp, cpu_env, tmp);
if (op & 2)
- gen_helper_sub_saturate(tmp, tmp2, tmp);
+ gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
else
- gen_helper_add_saturate(tmp, tmp, tmp2);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
} else {
tmp = load_reg(s, rn);
tcg_temp_free_i32(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
* however it may overflow considered as a signed
* operation, in which case we must set the Q flag.
*/
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
if (rs != 15)
{
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
gen_helper_v7m_mrs(tmp, cpu_env, addr);
tcg_temp_free_i32(addr);
} else {
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
}
store_reg(s, rd, tmp);
break;
if (op & 4) {
/* Unsigned. */
if ((op & 1) && shift == 0)
- gen_helper_usat16(tmp, tmp, tmp2);
+ gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_usat(tmp, tmp, tmp2);
+ gen_helper_usat(tmp, cpu_env, tmp, tmp2);
} else {
/* Signed. */
if ((op & 1) && shift == 0)
- gen_helper_ssat16(tmp, tmp, tmp2);
+ gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat(tmp, tmp, tmp2);
+ gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
break;
}
}
- insn = arm_lduw_code(s->pc, s->bswap_code);
+ insn = arm_lduw_code(env, s->pc, s->bswap_code);
s->pc += 2;
switch (insn >> 12) {
if (s->condexec_mask)
tcg_gen_sub_i32(tmp, tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
} else {
if (s->condexec_mask)
tcg_gen_add_i32(tmp, tmp, tmp2);
else
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
tcg_gen_movi_i32(tmp2, insn & 0xff);
switch (op) {
case 1: /* cmp */
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(tmp2);
break;
if (s->condexec_mask)
tcg_gen_add_i32(tmp, tmp, tmp2);
else
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
if (s->condexec_mask)
tcg_gen_sub_i32(tmp, tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
case 1: /* cmp */
tmp = load_reg(s, rd);
tmp2 = load_reg(s, rm);
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
tcg_temp_free_i32(tmp);
break;
break;
case 0x2: /* lsl */
if (s->condexec_mask) {
- gen_helper_shl(tmp2, tmp2, tmp);
+ gen_helper_shl(tmp2, cpu_env, tmp2, tmp);
} else {
- gen_helper_shl_cc(tmp2, tmp2, tmp);
+ gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
case 0x3: /* lsr */
if (s->condexec_mask) {
- gen_helper_shr(tmp2, tmp2, tmp);
+ gen_helper_shr(tmp2, cpu_env, tmp2, tmp);
} else {
- gen_helper_shr_cc(tmp2, tmp2, tmp);
+ gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
case 0x4: /* asr */
if (s->condexec_mask) {
- gen_helper_sar(tmp2, tmp2, tmp);
+ gen_helper_sar(tmp2, cpu_env, tmp2, tmp);
} else {
- gen_helper_sar_cc(tmp2, tmp2, tmp);
+ gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
if (s->condexec_mask)
gen_adc(tmp, tmp2);
else
- gen_helper_adc_cc(tmp, tmp, tmp2);
+ gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
break;
case 0x6: /* sbc */
if (s->condexec_mask)
gen_sub_carry(tmp, tmp, tmp2);
else
- gen_helper_sbc_cc(tmp, tmp, tmp2);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
break;
case 0x7: /* ror */
if (s->condexec_mask) {
tcg_gen_andi_i32(tmp, tmp, 0x1f);
tcg_gen_rotr_i32(tmp2, tmp2, tmp);
} else {
- gen_helper_ror_cc(tmp2, tmp2, tmp);
+ gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
if (s->condexec_mask)
tcg_gen_neg_i32(tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
break;
case 0xa: /* cmp */
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
rd = 16;
break;
case 0xb: /* cmn */
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
rd = 16;
break;
case 0xc: /* orr */
/* nothing more to generate */
break;
case DISAS_WFI:
- gen_helper_wfi();
+ gen_helper_wfi(cpu_env);
break;
case DISAS_SWI:
gen_exception(EXCP_SWI);
obj-y += translate.o op_helper.o helper.o cpu.o
obj-$(CONFIG_SOFTMMU) += mmu.o machine.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
}
/* Now that we are in kernel mode, load the handlers address. */
- env->pc = ldl_code(env->pregs[PR_EBP] + ex_vec * 4);
+ env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4);
env->locked_irq = 1;
env->pregs[PR_CCS] |= F_FLAG_V10; /* set F. */
/* Now that we are in kernel mode, load the handlers address.
This load may not fault, real hw leaves that behaviour as
undefined. */
- env->pc = ldl_code(env->pregs[PR_EBP] + ex_vec * 4);
+ env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4);
/* Clear the excption_index to avoid spurios hw_aborts for recursive
bus faults. */
#include "def-helper.h"
-DEF_HELPER_1(raise_exception, void, i32)
-DEF_HELPER_1(tlb_flush_pid, void, i32)
-DEF_HELPER_1(spc_write, void, i32)
+DEF_HELPER_2(raise_exception, void, env, i32)
+DEF_HELPER_2(tlb_flush_pid, void, env, i32)
+DEF_HELPER_2(spc_write, void, env, i32)
DEF_HELPER_3(dump, void, i32, i32, i32)
-DEF_HELPER_0(rfe, void);
-DEF_HELPER_0(rfn, void);
+DEF_HELPER_1(rfe, void, env);
+DEF_HELPER_1(rfn, void, env);
-DEF_HELPER_2(movl_sreg_reg, void, i32, i32)
-DEF_HELPER_2(movl_reg_sreg, void, i32, i32)
+DEF_HELPER_3(movl_sreg_reg, void, env, i32, i32)
+DEF_HELPER_3(movl_reg_sreg, void, env, i32, i32)
DEF_HELPER_FLAGS_1(lz, TCG_CALL_PURE, i32, i32);
-DEF_HELPER_FLAGS_3(btst, TCG_CALL_PURE, i32, i32, i32, i32);
+DEF_HELPER_FLAGS_4(btst, TCG_CALL_PURE, i32, env, i32, i32, i32);
-DEF_HELPER_FLAGS_3(evaluate_flags_muls, TCG_CALL_PURE, i32, i32, i32, i32)
-DEF_HELPER_FLAGS_3(evaluate_flags_mulu, TCG_CALL_PURE, i32, i32, i32, i32)
-DEF_HELPER_FLAGS_4(evaluate_flags_mcp, TCG_CALL_PURE, i32, i32, i32, i32, i32)
-DEF_HELPER_FLAGS_4(evaluate_flags_alu_4, TCG_CALL_PURE, i32, i32, i32, i32, i32)
-DEF_HELPER_FLAGS_4(evaluate_flags_sub_4, TCG_CALL_PURE, i32, i32, i32, i32, i32)
-DEF_HELPER_FLAGS_2(evaluate_flags_move_4, TCG_CALL_PURE, i32, i32, i32)
-DEF_HELPER_FLAGS_2(evaluate_flags_move_2, TCG_CALL_PURE, i32, i32, i32)
-DEF_HELPER_0(evaluate_flags, void)
-DEF_HELPER_0(top_evaluate_flags, void)
+DEF_HELPER_FLAGS_4(evaluate_flags_muls, TCG_CALL_PURE, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_4(evaluate_flags_mulu, TCG_CALL_PURE, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_mcp, TCG_CALL_PURE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_alu_4, TCG_CALL_PURE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_sub_4, TCG_CALL_PURE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(evaluate_flags_move_4, TCG_CALL_PURE, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(evaluate_flags_move_2, TCG_CALL_PURE, i32, env, i32, i32)
+DEF_HELPER_1(evaluate_flags, void, env)
+DEF_HELPER_1(top_evaluate_flags, void, env)
#include "def-helper.h"
*/
#include "cpu.h"
-#include "dyngen-exec.h"
#include "mmu.h"
#include "helper.h"
#include "host-utils.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUCRISState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUCRISState *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
TranslationBlock *tb;
- CPUCRISState *saved_env;
int ret;
- saved_env = env;
- env = env1;
-
D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
env->pc, env->debug1, (void *)retaddr);
ret = cpu_cris_handle_mmu_fault(env, addr, is_write, mmu_idx);
cpu_restore_state(tb, env, retaddr);
/* Evaluate flags after retranslation. */
- helper_top_evaluate_flags();
+ helper_top_evaluate_flags(env);
}
}
cpu_loop_exit(env);
}
- env = saved_env;
}
#endif
-void helper_raise_exception(uint32_t index)
+void helper_raise_exception(CPUCRISState *env, uint32_t index)
{
env->exception_index = index;
cpu_loop_exit(env);
}
-void helper_tlb_flush_pid(uint32_t pid)
+void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
{
#if !defined(CONFIG_USER_ONLY)
pid &= 0xff;
#endif
}
-void helper_spc_write(uint32_t new_spc)
+void helper_spc_write(CPUCRISState *env, uint32_t new_spc)
{
#if !defined(CONFIG_USER_ONLY)
tlb_flush_page(env, env->pregs[PR_SPC]);
#define EXTRACT_FIELD(src, start, end) \
(((src) >> start) & ((1 << (end - start + 1)) - 1))
-void helper_movl_sreg_reg (uint32_t sreg, uint32_t reg)
+void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
{
uint32_t srs;
srs = env->pregs[PR_SRS];
#endif
}
-void helper_movl_reg_sreg (uint32_t reg, uint32_t sreg)
+void helper_movl_reg_sreg(CPUCRISState *env, uint32_t reg, uint32_t sreg)
{
uint32_t srs;
env->pregs[PR_SRS] &= 3;
env->pregs[PR_CCS] = ccs;
}
-void helper_rfe(void)
+void helper_rfe(CPUCRISState *env)
{
int rflag = env->pregs[PR_CCS] & R_FLAG;
env->pregs[PR_CCS] |= P_FLAG;
}
-void helper_rfn(void)
+void helper_rfn(CPUCRISState *env)
{
int rflag = env->pregs[PR_CCS] & R_FLAG;
return clz32(t0);
}
-uint32_t helper_btst(uint32_t t0, uint32_t t1, uint32_t ccs)
+uint32_t helper_btst(CPUCRISState *env, uint32_t t0, uint32_t t1, uint32_t ccs)
{
/* FIXME: clean this up. */
return ccs;
}
-static inline uint32_t evaluate_flags_writeback(uint32_t flags, uint32_t ccs)
+static inline uint32_t evaluate_flags_writeback(CPUCRISState *env,
+ uint32_t flags, uint32_t ccs)
{
unsigned int x, z, mask;
return ccs;
}
-uint32_t helper_evaluate_flags_muls(uint32_t ccs, uint32_t res, uint32_t mof)
+uint32_t helper_evaluate_flags_muls(CPUCRISState *env,
+ uint32_t ccs, uint32_t res, uint32_t mof)
{
uint32_t flags = 0;
int64_t tmp;
if ((dneg && mof != -1)
|| (!dneg && mof != 0))
flags |= V_FLAG;
- return evaluate_flags_writeback(flags, ccs);
+ return evaluate_flags_writeback(env, flags, ccs);
}
-uint32_t helper_evaluate_flags_mulu(uint32_t ccs, uint32_t res, uint32_t mof)
+uint32_t helper_evaluate_flags_mulu(CPUCRISState *env,
+ uint32_t ccs, uint32_t res, uint32_t mof)
{
uint32_t flags = 0;
uint64_t tmp;
if (mof)
flags |= V_FLAG;
- return evaluate_flags_writeback(flags, ccs);
+ return evaluate_flags_writeback(env, flags, ccs);
}
-uint32_t helper_evaluate_flags_mcp(uint32_t ccs,
+uint32_t helper_evaluate_flags_mcp(CPUCRISState *env, uint32_t ccs,
uint32_t src, uint32_t dst, uint32_t res)
{
uint32_t flags = 0;
flags |= R_FLAG;
}
- return evaluate_flags_writeback(flags, ccs);
+ return evaluate_flags_writeback(env, flags, ccs);
}
-uint32_t helper_evaluate_flags_alu_4(uint32_t ccs,
+uint32_t helper_evaluate_flags_alu_4(CPUCRISState *env, uint32_t ccs,
uint32_t src, uint32_t dst, uint32_t res)
{
uint32_t flags = 0;
flags |= C_FLAG;
}
- return evaluate_flags_writeback(flags, ccs);
+ return evaluate_flags_writeback(env, flags, ccs);
}
-uint32_t helper_evaluate_flags_sub_4(uint32_t ccs,
+uint32_t helper_evaluate_flags_sub_4(CPUCRISState *env, uint32_t ccs,
uint32_t src, uint32_t dst, uint32_t res)
{
uint32_t flags = 0;
}
flags ^= C_FLAG;
- return evaluate_flags_writeback(flags, ccs);
+ return evaluate_flags_writeback(env, flags, ccs);
}
-uint32_t helper_evaluate_flags_move_4(uint32_t ccs, uint32_t res)
+uint32_t helper_evaluate_flags_move_4(CPUCRISState *env,
+ uint32_t ccs, uint32_t res)
{
uint32_t flags = 0;
else if (res == 0L)
flags |= Z_FLAG;
- return evaluate_flags_writeback(flags, ccs);
+ return evaluate_flags_writeback(env, flags, ccs);
}
-uint32_t helper_evaluate_flags_move_2(uint32_t ccs, uint32_t res)
+uint32_t helper_evaluate_flags_move_2(CPUCRISState *env,
+ uint32_t ccs, uint32_t res)
{
uint32_t flags = 0;
else if (res == 0)
flags |= Z_FLAG;
- return evaluate_flags_writeback(flags, ccs);
+ return evaluate_flags_writeback(env, flags, ccs);
}
/* TODO: This is expensive. We could split things up and only evaluate part of
CCR on a need to know basis. For now, we simply re-evaluate everything. */
-void helper_evaluate_flags(void)
+void helper_evaluate_flags(CPUCRISState *env)
{
uint32_t src, dst, res;
uint32_t flags = 0;
if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP)
flags ^= C_FLAG;
- env->pregs[PR_CCS] = evaluate_flags_writeback(flags, env->pregs[PR_CCS]);
+ env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags,
+ env->pregs[PR_CCS]);
}
-void helper_top_evaluate_flags(void)
+void helper_top_evaluate_flags(CPUCRISState *env)
{
switch (env->cc_op)
{
case CC_OP_MCP:
- env->pregs[PR_CCS] = helper_evaluate_flags_mcp(
+ env->pregs[PR_CCS] = helper_evaluate_flags_mcp(env,
env->pregs[PR_CCS], env->cc_src,
env->cc_dest, env->cc_result);
break;
case CC_OP_MULS:
- env->pregs[PR_CCS] = helper_evaluate_flags_muls(
+ env->pregs[PR_CCS] = helper_evaluate_flags_muls(env,
env->pregs[PR_CCS], env->cc_result,
env->pregs[PR_MOF]);
break;
case CC_OP_MULU:
- env->pregs[PR_CCS] = helper_evaluate_flags_mulu(
+ env->pregs[PR_CCS] = helper_evaluate_flags_mulu(env,
env->pregs[PR_CCS], env->cc_result,
env->pregs[PR_MOF]);
break;
{
case 4:
env->pregs[PR_CCS] =
- helper_evaluate_flags_move_4(
+ helper_evaluate_flags_move_4(env,
env->pregs[PR_CCS],
env->cc_result);
break;
case 2:
env->pregs[PR_CCS] =
- helper_evaluate_flags_move_2(
+ helper_evaluate_flags_move_2(env,
env->pregs[PR_CCS],
env->cc_result);
break;
default:
- helper_evaluate_flags();
+ helper_evaluate_flags(env);
break;
}
break;
case CC_OP_CMP:
if (env->cc_size == 4)
env->pregs[PR_CCS] =
- helper_evaluate_flags_sub_4(
+ helper_evaluate_flags_sub_4(env,
env->pregs[PR_CCS],
env->cc_src, env->cc_dest,
env->cc_result);
else
- helper_evaluate_flags();
+ helper_evaluate_flags(env);
break;
default:
{
{
case 4:
env->pregs[PR_CCS] =
- helper_evaluate_flags_alu_4(
+ helper_evaluate_flags_alu_4(env,
env->pregs[PR_CCS],
env->cc_src, env->cc_dest,
env->cc_result);
break;
default:
- helper_evaluate_flags();
+ helper_evaluate_flags(env);
break;
}
}
target_ulong pc, ppc;
/* Decoder. */
- unsigned int (*decoder)(struct DisasContext *dc);
+ unsigned int (*decoder)(CPUCRISState *env, struct DisasContext *dc);
uint32_t ir;
uint32_t opcode;
unsigned int op1;
tcg_gen_andi_tl(cpu_PR[r], tn, 3);
else {
if (r == PR_PID)
- gen_helper_tlb_flush_pid(tn);
+ gen_helper_tlb_flush_pid(cpu_env, tn);
if (dc->tb_flags & S_FLAG && r == PR_SPC)
- gen_helper_spc_write(tn);
+ gen_helper_spc_write(cpu_env, tn);
else if (r == PR_CCS)
dc->cpustate_changed = 1;
tcg_gen_mov_tl(cpu_PR[r], tn);
return sval;
}
-static int cris_fetch(DisasContext *dc, uint32_t addr,
+static int cris_fetch(CPUCRISState *env, DisasContext *dc, uint32_t addr,
unsigned int size, unsigned int sign)
{
int r;
switch (size) {
case 4:
{
- r = ldl_code(addr);
+ r = cpu_ldl_code(env, addr);
break;
}
case 2:
{
if (sign) {
- r = ldsw_code(addr);
+ r = cpu_ldsw_code(env, addr);
} else {
- r = lduw_code(addr);
+ r = cpu_lduw_code(env, addr);
}
break;
}
case 1:
{
if (sign) {
- r = ldsb_code(addr);
+ r = cpu_ldsb_code(env, addr);
} else {
- r = ldub_code(addr);
+ r = cpu_ldub_code(env, addr);
}
break;
}
static inline void t_gen_raise_exception(uint32_t index)
{
TCGv_i32 tmp = tcg_const_i32(index);
- gen_helper_raise_exception(tmp);
+ gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
}
switch (dc->cc_op)
{
case CC_OP_MCP:
- gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS],
+ gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], cpu_env,
cpu_PR[PR_CCS], cc_src,
cc_dest, cc_result);
break;
case CC_OP_MULS:
- gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS],
+ gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], cpu_env,
cpu_PR[PR_CCS], cc_result,
cpu_PR[PR_MOF]);
break;
case CC_OP_MULU:
- gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS],
+ gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], cpu_env,
cpu_PR[PR_CCS], cc_result,
cpu_PR[PR_MOF]);
break;
switch (dc->cc_size)
{
case 4:
- gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
- cpu_PR[PR_CCS], cc_result);
+ gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
+ cpu_env, cpu_PR[PR_CCS], cc_result);
break;
case 2:
- gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
- cpu_PR[PR_CCS], cc_result);
+ gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
+ cpu_env, cpu_PR[PR_CCS], cc_result);
break;
default:
- gen_helper_evaluate_flags();
+ gen_helper_evaluate_flags(cpu_env);
break;
}
break;
case CC_OP_SUB:
case CC_OP_CMP:
if (dc->cc_size == 4)
- gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS],
+ gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], cpu_env,
cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
else
- gen_helper_evaluate_flags();
+ gen_helper_evaluate_flags(cpu_env);
break;
default:
switch (dc->cc_size)
{
case 4:
- gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS],
+ gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], cpu_env,
cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
break;
default:
- gen_helper_evaluate_flags();
+ gen_helper_evaluate_flags(cpu_env);
break;
}
break;
t_gen_zext(dst, cpu_R[rd], size);
}
-static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
- TCGv dst)
+static int dec_prep_move_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst)
{
unsigned int rs;
uint32_t imm;
if (memsize == 1)
insn_len++;
- imm = cris_fetch(dc, dc->pc + 2, memsize, s_ext);
+ imm = cris_fetch(env, dc, dc->pc + 2, memsize, s_ext);
tcg_gen_movi_tl(dst, imm);
dc->postinc = 0;
} else {
/* Prepare T0 and T1 for a memory + alu operation.
s_ext decides if the operand1 should be sign-extended or zero-extended when
needed. */
-static int dec_prep_alu_m(DisasContext *dc, int s_ext, int memsize,
- TCGv dst, TCGv src)
+static int dec_prep_alu_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst, TCGv src)
{
int insn_len;
- insn_len = dec_prep_move_m(dc, s_ext, memsize, src);
+ insn_len = dec_prep_move_m(env, dc, s_ext, memsize, src);
tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
return insn_len;
}
/* Start of insn decoders. */
-static int dec_bccq(DisasContext *dc)
+static int dec_bccq(CPUCRISState *env, DisasContext *dc)
{
int32_t offset;
int sign;
cris_prepare_cc_branch (dc, offset, cond);
return 2;
}
-static int dec_addoq(DisasContext *dc)
+static int dec_addoq(CPUCRISState *env, DisasContext *dc)
{
int32_t imm;
return 2;
}
-static int dec_addq(DisasContext *dc)
+static int dec_addq(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
return 2;
}
-static int dec_moveq(DisasContext *dc)
+static int dec_moveq(CPUCRISState *env, DisasContext *dc)
{
uint32_t imm;
tcg_gen_movi_tl(cpu_R[dc->op2], imm);
return 2;
}
-static int dec_subq(DisasContext *dc)
+static int dec_subq(CPUCRISState *env, DisasContext *dc)
{
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
return 2;
}
-static int dec_cmpq(DisasContext *dc)
+static int dec_cmpq(CPUCRISState *env, DisasContext *dc)
{
uint32_t imm;
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
return 2;
}
-static int dec_andq(DisasContext *dc)
+static int dec_andq(CPUCRISState *env, DisasContext *dc)
{
uint32_t imm;
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
return 2;
}
-static int dec_orq(DisasContext *dc)
+static int dec_orq(CPUCRISState *env, DisasContext *dc)
{
uint32_t imm;
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
return 2;
}
-static int dec_btstq(DisasContext *dc)
+static int dec_btstq(CPUCRISState *env, DisasContext *dc)
{
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
cris_evaluate_flags(dc);
- gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
dc->flags_uptodate = 1;
return 2;
}
-static int dec_asrq(DisasContext *dc)
+static int dec_asrq(CPUCRISState *env, DisasContext *dc)
{
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
cpu_R[dc->op2], cpu_R[dc->op2], 4);
return 2;
}
-static int dec_lslq(DisasContext *dc)
+static int dec_lslq(CPUCRISState *env, DisasContext *dc)
{
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
cpu_R[dc->op2], cpu_R[dc->op2], 4);
return 2;
}
-static int dec_lsrq(DisasContext *dc)
+static int dec_lsrq(CPUCRISState *env, DisasContext *dc)
{
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
return 2;
}
-static int dec_move_r(DisasContext *dc)
+static int dec_move_r(CPUCRISState *env, DisasContext *dc)
{
int size = memsize_zz(dc);
return 2;
}
-static int dec_scc_r(DisasContext *dc)
+static int dec_scc_r(CPUCRISState *env, DisasContext *dc)
{
int cond = dc->op2;
}
}
-static int dec_and_r(DisasContext *dc)
+static int dec_and_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_lz_r(DisasContext *dc)
+static int dec_lz_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
LOG_DIS("lz $r%u, $r%u\n",
return 2;
}
-static int dec_lsl_r(DisasContext *dc)
+static int dec_lsl_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_lsr_r(DisasContext *dc)
+static int dec_lsr_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_asr_r(DisasContext *dc)
+static int dec_asr_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_muls_r(DisasContext *dc)
+static int dec_muls_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_mulu_r(DisasContext *dc)
+static int dec_mulu_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
}
-static int dec_dstep_r(DisasContext *dc)
+static int dec_dstep_r(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
return 2;
}
-static int dec_xor_r(DisasContext *dc)
+static int dec_xor_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_bound_r(DisasContext *dc)
+static int dec_bound_r(CPUCRISState *env, DisasContext *dc)
{
TCGv l0;
int size = memsize_zz(dc);
return 2;
}
-static int dec_cmp_r(DisasContext *dc)
+static int dec_cmp_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_abs_r(DisasContext *dc)
+static int dec_abs_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
return 2;
}
-static int dec_add_r(DisasContext *dc)
+static int dec_add_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_addc_r(DisasContext *dc)
+static int dec_addc_r(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("addc $r%u, $r%u\n",
dc->op1, dc->op2);
return 2;
}
-static int dec_mcp_r(DisasContext *dc)
+static int dec_mcp_r(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("mcp $p%u, $r%u\n",
dc->op2, dc->op1);
}
#endif
-static int dec_swap_r(DisasContext *dc)
+static int dec_swap_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
#if DISAS_CRIS
return 2;
}
-static int dec_or_r(DisasContext *dc)
+static int dec_or_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_addi_r(DisasContext *dc)
+static int dec_addi_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
LOG_DIS("addi.%c $r%u, $r%u\n",
return 2;
}
-static int dec_addi_acr(DisasContext *dc)
+static int dec_addi_acr(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
return 2;
}
-static int dec_neg_r(DisasContext *dc)
+static int dec_neg_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
return 2;
}
-static int dec_btst_r(DisasContext *dc)
+static int dec_btst_r(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("btst $r%u, $r%u\n",
dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
cris_evaluate_flags(dc);
- gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
cpu_R[dc->op1], cpu_PR[PR_CCS]);
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
cpu_R[dc->op2], cpu_R[dc->op2], 4);
return 2;
}
-static int dec_sub_r(DisasContext *dc)
+static int dec_sub_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int size = memsize_zz(dc);
}
/* Zero extension. From size to dword. */
-static int dec_movu_r(DisasContext *dc)
+static int dec_movu_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
int size = memsize_z(dc);
}
/* Sign extension. From size to dword. */
-static int dec_movs_r(DisasContext *dc)
+static int dec_movs_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
int size = memsize_z(dc);
}
/* zero extension. From size to dword. */
-static int dec_addu_r(DisasContext *dc)
+static int dec_addu_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
int size = memsize_z(dc);
}
/* Sign extension. From size to dword. */
-static int dec_adds_r(DisasContext *dc)
+static int dec_adds_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
int size = memsize_z(dc);
}
/* Zero extension. From size to dword. */
-static int dec_subu_r(DisasContext *dc)
+static int dec_subu_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
int size = memsize_z(dc);
}
/* Sign extension. From size to dword. */
-static int dec_subs_r(DisasContext *dc)
+static int dec_subs_r(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
int size = memsize_z(dc);
return 2;
}
-static int dec_setclrf(DisasContext *dc)
+static int dec_setclrf(CPUCRISState *env, DisasContext *dc)
{
uint32_t flags;
int set = (~dc->opcode >> 2) & 1;
return 2;
}
-static int dec_move_rs(DisasContext *dc)
+static int dec_move_rs(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
cris_cc_mask(dc, 0);
- gen_helper_movl_sreg_reg(tcg_const_tl(dc->op2), tcg_const_tl(dc->op1));
+ gen_helper_movl_sreg_reg(cpu_env, tcg_const_tl(dc->op2),
+ tcg_const_tl(dc->op1));
return 2;
}
-static int dec_move_sr(DisasContext *dc)
+static int dec_move_sr(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
cris_cc_mask(dc, 0);
- gen_helper_movl_reg_sreg(tcg_const_tl(dc->op1), tcg_const_tl(dc->op2));
+ gen_helper_movl_reg_sreg(cpu_env, tcg_const_tl(dc->op1),
+ tcg_const_tl(dc->op2));
return 2;
}
-static int dec_move_rp(DisasContext *dc)
+static int dec_move_rp(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
tcg_temp_free(t[0]);
return 2;
}
-static int dec_move_pr(DisasContext *dc)
+static int dec_move_pr(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
return 2;
}
-static int dec_move_mr(DisasContext *dc)
+static int dec_move_mr(CPUCRISState *env, DisasContext *dc)
{
int memsize = memsize_zz(dc);
int insn_len;
dc->op2);
if (memsize == 4) {
- insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]);
+ insn_len = dec_prep_move_m(env, dc, 0, 4, cpu_R[dc->op2]);
cris_cc_mask(dc, CC_MASK_NZ);
cris_update_cc_op(dc, CC_OP_MOVE, 4);
cris_update_cc_x(dc);
TCGv t0;
t0 = tcg_temp_new();
- insn_len = dec_prep_move_m(dc, 0, memsize, t0);
+ insn_len = dec_prep_move_m(env, dc, 0, memsize, t0);
cris_cc_mask(dc, CC_MASK_NZ);
cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
tcg_temp_free(t[1]);
}
-static int dec_movs_m(DisasContext *dc)
+static int dec_movs_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_z(dc);
cris_alu_m_alloc_temps(t);
/* sign extend. */
- insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZ);
cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
return insn_len;
}
-static int dec_addu_m(DisasContext *dc)
+static int dec_addu_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_z(dc);
cris_alu_m_alloc_temps(t);
/* sign extend. */
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_ADD,
cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
return insn_len;
}
-static int dec_adds_m(DisasContext *dc)
+static int dec_adds_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_z(dc);
cris_alu_m_alloc_temps(t);
/* sign extend. */
- insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_subu_m(DisasContext *dc)
+static int dec_subu_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_z(dc);
cris_alu_m_alloc_temps(t);
/* sign extend. */
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_subs_m(DisasContext *dc)
+static int dec_subs_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_z(dc);
cris_alu_m_alloc_temps(t);
/* sign extend. */
- insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_movu_m(DisasContext *dc)
+static int dec_movu_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_z(dc);
dc->op2);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZ);
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_cmpu_m(DisasContext *dc)
+static int dec_cmpu_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_z(dc);
dc->op2);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_cmps_m(DisasContext *dc)
+static int dec_cmps_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_z(dc);
dc->op2);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_CMP,
cpu_R[dc->op2], cpu_R[dc->op2], t[1],
return insn_len;
}
-static int dec_cmp_m(DisasContext *dc)
+static int dec_cmp_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_zz(dc);
dc->op2);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_CMP,
cpu_R[dc->op2], cpu_R[dc->op2], t[1],
return insn_len;
}
-static int dec_test_m(DisasContext *dc)
+static int dec_test_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_zz(dc);
cris_evaluate_flags(dc);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZ);
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
return insn_len;
}
-static int dec_and_m(DisasContext *dc)
+static int dec_and_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_zz(dc);
dc->op2);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZ);
cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_add_m(DisasContext *dc)
+static int dec_add_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_zz(dc);
dc->op2);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_ADD,
cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
return insn_len;
}
-static int dec_addo_m(DisasContext *dc)
+static int dec_addo_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_zz(dc);
dc->op2);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
cris_cc_mask(dc, 0);
cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_bound_m(DisasContext *dc)
+static int dec_bound_m(CPUCRISState *env, DisasContext *dc)
{
TCGv l[2];
int memsize = memsize_zz(dc);
l[0] = tcg_temp_local_new();
l[1] = tcg_temp_local_new();
- insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, l[0], l[1]);
cris_cc_mask(dc, CC_MASK_NZ);
cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_addc_mr(DisasContext *dc)
+static int dec_addc_mr(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int insn_len = 2;
dc->flags_x = X_FLAG;
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, 4, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, 4, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
do_postinc(dc, 4);
return insn_len;
}
-static int dec_sub_m(DisasContext *dc)
+static int dec_sub_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_zz(dc);
dc->op2, dc->ir, dc->zzsize);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
do_postinc(dc, memsize);
return insn_len;
}
-static int dec_or_m(DisasContext *dc)
+static int dec_or_m(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_zz(dc);
dc->op2, dc->pc);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, CC_MASK_NZ);
cris_alu(dc, CC_OP_OR,
cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
return insn_len;
}
-static int dec_move_mp(DisasContext *dc)
+static int dec_move_mp(CPUCRISState *env, DisasContext *dc)
{
TCGv t[2];
int memsize = memsize_zz(dc);
dc->op2);
cris_alu_m_alloc_temps(t);
- insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
cris_cc_mask(dc, 0);
if (dc->op2 == PR_CCS) {
cris_evaluate_flags(dc);
return insn_len;
}
-static int dec_move_pm(DisasContext *dc)
+static int dec_move_pm(CPUCRISState *env, DisasContext *dc)
{
TCGv t0;
int memsize;
return 2;
}
-static int dec_movem_mr(DisasContext *dc)
+static int dec_movem_mr(CPUCRISState *env, DisasContext *dc)
{
TCGv_i64 tmp[16];
TCGv tmp32;
return 2;
}
-static int dec_movem_rm(DisasContext *dc)
+static int dec_movem_rm(CPUCRISState *env, DisasContext *dc)
{
TCGv tmp;
TCGv addr;
return 2;
}
-static int dec_move_rm(DisasContext *dc)
+static int dec_move_rm(CPUCRISState *env, DisasContext *dc)
{
int memsize;
return 2;
}
-static int dec_lapcq(DisasContext *dc)
+static int dec_lapcq(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("lapcq %x, $r%u\n",
dc->pc + dc->op1*2, dc->op2);
return 2;
}
-static int dec_lapc_im(DisasContext *dc)
+static int dec_lapc_im(CPUCRISState *env, DisasContext *dc)
{
unsigned int rd;
int32_t imm;
rd = dc->op2;
cris_cc_mask(dc, 0);
- imm = cris_fetch(dc, dc->pc + 2, 4, 0);
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
pc = dc->pc;
}
/* Jump to special reg. */
-static int dec_jump_p(DisasContext *dc)
+static int dec_jump_p(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("jump $p%u\n", dc->op2);
}
/* Jump and save. */
-static int dec_jas_r(DisasContext *dc)
+static int dec_jas_r(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
cris_cc_mask(dc, 0);
return 2;
}
-static int dec_jas_im(DisasContext *dc)
+static int dec_jas_im(CPUCRISState *env, DisasContext *dc)
{
uint32_t imm;
- imm = cris_fetch(dc, dc->pc + 2, 4, 0);
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
LOG_DIS("jas 0x%x\n", imm);
cris_cc_mask(dc, 0);
return 6;
}
-static int dec_jasc_im(DisasContext *dc)
+static int dec_jasc_im(CPUCRISState *env, DisasContext *dc)
{
uint32_t imm;
- imm = cris_fetch(dc, dc->pc + 2, 4, 0);
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
LOG_DIS("jasc 0x%x\n", imm);
cris_cc_mask(dc, 0);
return 6;
}
-static int dec_jasc_r(DisasContext *dc)
+static int dec_jasc_r(CPUCRISState *env, DisasContext *dc)
{
LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
cris_cc_mask(dc, 0);
return 2;
}
-static int dec_bcc_im(DisasContext *dc)
+static int dec_bcc_im(CPUCRISState *env, DisasContext *dc)
{
int32_t offset;
uint32_t cond = dc->op2;
- offset = cris_fetch(dc, dc->pc + 2, 2, 1);
+ offset = cris_fetch(env, dc, dc->pc + 2, 2, 1);
LOG_DIS("b%s %d pc=%x dst=%x\n",
cc_name(cond), offset,
return 4;
}
-static int dec_bas_im(DisasContext *dc)
+static int dec_bas_im(CPUCRISState *env, DisasContext *dc)
{
int32_t simm;
- simm = cris_fetch(dc, dc->pc + 2, 4, 0);
+ simm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
cris_cc_mask(dc, 0);
return 6;
}
-static int dec_basc_im(DisasContext *dc)
+static int dec_basc_im(CPUCRISState *env, DisasContext *dc)
{
int32_t simm;
- simm = cris_fetch(dc, dc->pc + 2, 4, 0);
+ simm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
cris_cc_mask(dc, 0);
return 6;
}
-static int dec_rfe_etc(DisasContext *dc)
+static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
{
cris_cc_mask(dc, 0);
/* rfe. */
LOG_DIS("rfe\n");
cris_evaluate_flags(dc);
- gen_helper_rfe();
+ gen_helper_rfe(cpu_env);
dc->is_jmp = DISAS_UPDATE;
break;
case 5:
/* rfn. */
LOG_DIS("rfn\n");
cris_evaluate_flags(dc);
- gen_helper_rfn();
+ gen_helper_rfn(cpu_env);
dc->is_jmp = DISAS_UPDATE;
break;
case 6:
return 2;
}
-static int dec_ftag_fidx_d_m(DisasContext *dc)
+static int dec_ftag_fidx_d_m(CPUCRISState *env, DisasContext *dc)
{
return 2;
}
-static int dec_ftag_fidx_i_m(DisasContext *dc)
+static int dec_ftag_fidx_i_m(CPUCRISState *env, DisasContext *dc)
{
return 2;
}
-static int dec_null(DisasContext *dc)
+static int dec_null(CPUCRISState *env, DisasContext *dc)
{
printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
dc->pc, dc->opcode, dc->op1, dc->op2);
uint32_t bits;
uint32_t mask;
};
- int (*dec)(DisasContext *dc);
+ int (*dec)(CPUCRISState *env, DisasContext *dc);
} decinfo[] = {
/* Order matters here. */
{DEC_MOVEQ, dec_moveq},
{{0, 0}, dec_null}
};
-static unsigned int crisv32_decoder(DisasContext *dc)
+static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
{
int insn_len = 2;
int i;
tcg_gen_debug_insn_start(dc->pc);
/* Load a halfword onto the instruction register. */
- dc->ir = cris_fetch(dc, dc->pc, 2, 0);
+ dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
/* Now decode it. */
dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits)
{
- insn_len = decinfo[i].dec(dc);
+ insn_len = decinfo[i].dec(env, dc);
break;
}
}
gen_io_start();
dc->clear_x = 1;
- insn_len = dc->decoder(dc);
+ insn_len = dc->decoder(env, dc);
dc->ppc = dc->pc;
dc->pc += insn_len;
if (dc->clear_x)
return insn_len;
}
-static int dec10_prep_move_m(DisasContext *dc, int s_ext, int memsize,
- TCGv dst)
+static int dec10_prep_move_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst)
{
unsigned int rs;
uint32_t imm;
if (memsize != 4) {
if (s_ext) {
if (memsize == 1)
- imm = ldsb_code(dc->pc + 2);
+ imm = cpu_ldsb_code(env, dc->pc + 2);
else
- imm = ldsw_code(dc->pc + 2);
+ imm = cpu_ldsw_code(env, dc->pc + 2);
} else {
if (memsize == 1)
- imm = ldub_code(dc->pc + 2);
+ imm = cpu_ldub_code(env, dc->pc + 2);
else
- imm = lduw_code(dc->pc + 2);
+ imm = cpu_lduw_code(env, dc->pc + 2);
}
} else
- imm = ldl_code(dc->pc + 2);
+ imm = cpu_ldl_code(env, dc->pc + 2);
tcg_gen_movi_tl(dst, imm);
} else {
/* BTST */
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->dst],
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst],
tcg_const_tl(imm), cpu_PR[PR_CCS]);
}
break;
LOG_DIS("btst $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->dst],
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst],
cpu_R[dc->src], cpu_PR[PR_CCS]);
break;
case CRISV10_REG_DSTEP:
return insn_len;
}
-static unsigned int dec10_ind_move_m_r(DisasContext *dc, unsigned int size)
+static unsigned int dec10_ind_move_m_r(CPUCRISState *env, DisasContext *dc,
+ unsigned int size)
{
unsigned int insn_len = 2;
TCGv t;
cris_cc_mask(dc, CC_MASK_NZVC);
t = tcg_temp_new();
- insn_len += dec10_prep_move_m(dc, 0, size, t);
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t);
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, size);
if (dc->dst == 15) {
tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
return insn_len;
}
-static unsigned int dec10_ind_move_m_pr(DisasContext *dc)
+static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc)
{
unsigned int insn_len = 2, rd = dc->dst;
TCGv t, addr;
addr = tcg_temp_new();
t = tcg_temp_new();
- insn_len += dec10_prep_move_m(dc, 0, 4, t);
+ insn_len += dec10_prep_move_m(env, dc, 0, 4, t);
if (rd == 15) {
tcg_gen_mov_tl(env_btarget, t);
cris_prepare_jmp(dc, JMP_INDIRECT);
tcg_temp_free(t0);
}
-static int dec10_ind_alu(DisasContext *dc, int op, unsigned int size)
+static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc,
+ int op, unsigned int size)
{
int insn_len = 0;
int rd = dc->dst;
TCGv t[2];
cris_alu_m_alloc_temps(t);
- insn_len += dec10_prep_move_m(dc, 0, size, t[0]);
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t[0], size);
if (dc->dst == 15) {
tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
return insn_len;
}
-static int dec10_ind_bound(DisasContext *dc, unsigned int size)
+static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc,
+ unsigned int size)
{
int insn_len = 0;
int rd = dc->dst;
TCGv t;
t = tcg_temp_local_new();
- insn_len += dec10_prep_move_m(dc, 0, size, t);
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t);
cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[rd], t, 4);
if (dc->dst == 15) {
tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
return insn_len;
}
-static int dec10_alux_m(DisasContext *dc, int op)
+static int dec10_alux_m(CPUCRISState *env, DisasContext *dc, int op)
{
unsigned int size = (dc->size & 1) ? 2 : 1;
unsigned int sx = !!(dc->size & 2);
t = tcg_temp_new();
cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_prep_move_m(dc, sx, size, t);
+ insn_len += dec10_prep_move_m(env, dc, sx, size, t);
cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t, 4);
if (dc->dst == 15) {
tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
return insn_len;
}
-static int dec10_dip(DisasContext *dc)
+static int dec10_dip(CPUCRISState *env, DisasContext *dc)
{
int insn_len = 2;
uint32_t imm;
LOG_DIS("dip pc=%x opcode=%d r%d r%d\n",
dc->pc, dc->opcode, dc->src, dc->dst);
if (dc->src == 15) {
- imm = ldl_code(dc->pc + 2);
+ imm = cpu_ldl_code(env, dc->pc + 2);
tcg_gen_movi_tl(cpu_PR[PR_PREFIX], imm);
if (dc->postinc)
insn_len += 4;
return insn_len;
}
-static int dec10_bdap_m(DisasContext *dc, int size)
+static int dec10_bdap_m(CPUCRISState *env, DisasContext *dc, int size)
{
int insn_len = 2;
int rd = dc->dst;
}
#endif
/* Now the rest of the modes are truly indirect. */
- insn_len += dec10_prep_move_m(dc, 1, size, cpu_PR[PR_PREFIX]);
+ insn_len += dec10_prep_move_m(env, dc, 1, size, cpu_PR[PR_PREFIX]);
tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_PR[PR_PREFIX], cpu_R[rd]);
cris_set_prefix(dc);
return insn_len;
}
-static unsigned int dec10_ind(DisasContext *dc)
+static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
{
unsigned int insn_len = 2;
unsigned int size = dec10_size(dc->size);
if (dc->size != 3) {
switch (dc->opcode) {
case CRISV10_IND_MOVE_M_R:
- return dec10_ind_move_m_r(dc, size);
+ return dec10_ind_move_m_r(env, dc, size);
break;
case CRISV10_IND_MOVE_R_M:
return dec10_ind_move_r_m(dc, size);
case CRISV10_IND_CMP:
LOG_DIS("cmp size=%d op=%d %d\n", size, dc->src, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(dc, CC_OP_CMP, size);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_CMP, size);
break;
case CRISV10_IND_TEST:
LOG_DIS("test size=%d op=%d %d\n", size, dc->src, dc->dst);
cris_evaluate_flags(dc);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu_m_alloc_temps(t);
- insn_len += dec10_prep_move_m(dc, 0, size, t[0]);
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
t[0], tcg_const_tl(0), size);
case CRISV10_IND_ADD:
LOG_DIS("add size=%d op=%d %d\n", size, dc->src, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(dc, CC_OP_ADD, size);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_ADD, size);
break;
case CRISV10_IND_SUB:
LOG_DIS("sub size=%d op=%d %d\n", size, dc->src, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(dc, CC_OP_SUB, size);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_SUB, size);
break;
case CRISV10_IND_BOUND:
LOG_DIS("bound size=%d op=%d %d\n", size, dc->src, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_bound(dc, size);
+ insn_len += dec10_ind_bound(env, dc, size);
break;
case CRISV10_IND_AND:
LOG_DIS("and size=%d op=%d %d\n", size, dc->src, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(dc, CC_OP_AND, size);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_AND, size);
break;
case CRISV10_IND_OR:
LOG_DIS("or size=%d op=%d %d\n", size, dc->src, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- insn_len += dec10_ind_alu(dc, CC_OP_OR, size);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_OR, size);
break;
case CRISV10_IND_MOVX:
- insn_len = dec10_alux_m(dc, CC_OP_MOVE);
+ insn_len = dec10_alux_m(env, dc, CC_OP_MOVE);
break;
case CRISV10_IND_ADDX:
- insn_len = dec10_alux_m(dc, CC_OP_ADD);
+ insn_len = dec10_alux_m(env, dc, CC_OP_ADD);
break;
case CRISV10_IND_SUBX:
- insn_len = dec10_alux_m(dc, CC_OP_SUB);
+ insn_len = dec10_alux_m(env, dc, CC_OP_SUB);
break;
case CRISV10_IND_CMPX:
- insn_len = dec10_alux_m(dc, CC_OP_CMP);
+ insn_len = dec10_alux_m(env, dc, CC_OP_CMP);
break;
case CRISV10_IND_MUL:
/* This is a reg insn coded in the mem indir space. */
dec10_reg_mul(dc, size, dc->ir & (1 << 10));
break;
case CRISV10_IND_BDAP_M:
- insn_len = dec10_bdap_m(dc, size);
+ insn_len = dec10_bdap_m(env, dc, size);
break;
default:
LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n",
switch (dc->opcode) {
case CRISV10_IND_MOVE_M_SPR:
- insn_len = dec10_ind_move_m_pr(dc);
+ insn_len = dec10_ind_move_m_pr(env, dc);
break;
case CRISV10_IND_MOVE_SPR_M:
insn_len = dec10_ind_move_pr_m(dc);
if (dc->src == 15) {
LOG_DIS("jump.%d %d r%d r%d direct\n", size,
dc->opcode, dc->src, dc->dst);
- imm = ldl_code(dc->pc + 2);
+ imm = cpu_ldl_code(env, dc->pc + 2);
if (dc->mode == CRISV10_MODE_AUTOINC)
insn_len += size;
dc->delayed_branch--; /* v10 has no dslot here. */
break;
case CRISV10_IND_MOVX:
- insn_len = dec10_alux_m(dc, CC_OP_MOVE);
+ insn_len = dec10_alux_m(env, dc, CC_OP_MOVE);
break;
case CRISV10_IND_ADDX:
- insn_len = dec10_alux_m(dc, CC_OP_ADD);
+ insn_len = dec10_alux_m(env, dc, CC_OP_ADD);
break;
case CRISV10_IND_SUBX:
- insn_len = dec10_alux_m(dc, CC_OP_SUB);
+ insn_len = dec10_alux_m(env, dc, CC_OP_SUB);
break;
case CRISV10_IND_CMPX:
- insn_len = dec10_alux_m(dc, CC_OP_CMP);
+ insn_len = dec10_alux_m(env, dc, CC_OP_CMP);
break;
case CRISV10_IND_DIP:
- insn_len = dec10_dip(dc);
+ insn_len = dec10_dip(env, dc);
break;
case CRISV10_IND_BCC_M:
cris_cc_mask(dc, 0);
- imm = ldsw_code(dc->pc + 2);
+ imm = cpu_ldsw_code(env, dc->pc + 2);
simm = (int16_t)imm;
simm += 4;
return insn_len;
}
-static unsigned int crisv10_decoder(DisasContext *dc)
+static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
{
unsigned int insn_len = 2;
tcg_gen_debug_insn_start(dc->pc);
/* Load a halfword onto the instruction register. */
- dc->ir = lduw_code(dc->pc);
+ dc->ir = cpu_lduw_code(env, dc->pc);
/* Now decode it. */
dc->opcode = EXTRACT_FIELD(dc->ir, 6, 9);
break;
case CRISV10_MODE_AUTOINC:
case CRISV10_MODE_INDIRECT:
- insn_len = dec10_ind(dc);
+ insn_len = dec10_ind(env, dc);
break;
}
obj-y += translate.o op_helper.o helper.o cpu.o
obj-$(CONFIG_SOFTMMU) += machine.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
#include "def-helper.h"
-DEF_HELPER_1(raise_exception, void, i32)
-DEF_HELPER_0(hlt, void)
-DEF_HELPER_1(wcsr_im, void, i32)
-DEF_HELPER_1(wcsr_ip, void, i32)
-DEF_HELPER_1(wcsr_jtx, void, i32)
-DEF_HELPER_1(wcsr_jrx, void, i32)
-DEF_HELPER_0(rcsr_im, i32)
-DEF_HELPER_0(rcsr_ip, i32)
-DEF_HELPER_0(rcsr_jtx, i32)
-DEF_HELPER_0(rcsr_jrx, i32)
+DEF_HELPER_2(raise_exception, void, env, i32)
+DEF_HELPER_1(hlt, void, env)
+DEF_HELPER_2(wcsr_im, void, env, i32)
+DEF_HELPER_2(wcsr_ip, void, env, i32)
+DEF_HELPER_2(wcsr_jtx, void, env, i32)
+DEF_HELPER_2(wcsr_jrx, void, env, i32)
+DEF_HELPER_1(rcsr_im, i32, env)
+DEF_HELPER_1(rcsr_ip, i32, env)
+DEF_HELPER_1(rcsr_jtx, i32, env)
+DEF_HELPER_1(rcsr_jrx, i32, env)
#include "def-helper.h"
#include <assert.h>
#include "cpu.h"
-#include "dyngen-exec.h"
#include "helper.h"
#include "host-utils.h"
#define SHIFT 3
#include "softmmu_template.h"
-void helper_raise_exception(uint32_t index)
+void helper_raise_exception(CPULM32State *env, uint32_t index)
{
env->exception_index = index;
cpu_loop_exit(env);
}
-void helper_hlt(void)
+void helper_hlt(CPULM32State *env)
{
env->halted = 1;
env->exception_index = EXCP_HLT;
cpu_loop_exit(env);
}
-void helper_wcsr_im(uint32_t im)
+void helper_wcsr_im(CPULM32State *env, uint32_t im)
{
lm32_pic_set_im(env->pic_state, im);
}
-void helper_wcsr_ip(uint32_t im)
+void helper_wcsr_ip(CPULM32State *env, uint32_t im)
{
lm32_pic_set_ip(env->pic_state, im);
}
-void helper_wcsr_jtx(uint32_t jtx)
+void helper_wcsr_jtx(CPULM32State *env, uint32_t jtx)
{
lm32_juart_set_jtx(env->juart_state, jtx);
}
-void helper_wcsr_jrx(uint32_t jrx)
+void helper_wcsr_jrx(CPULM32State *env, uint32_t jrx)
{
lm32_juart_set_jrx(env->juart_state, jrx);
}
-uint32_t helper_rcsr_im(void)
+uint32_t helper_rcsr_im(CPULM32State *env)
{
return lm32_pic_get_im(env->pic_state);
}
-uint32_t helper_rcsr_ip(void)
+uint32_t helper_rcsr_ip(CPULM32State *env)
{
return lm32_pic_get_ip(env->pic_state);
}
-uint32_t helper_rcsr_jtx(void)
+uint32_t helper_rcsr_jtx(CPULM32State *env)
{
return lm32_juart_get_jtx(env->juart_state);
}
-uint32_t helper_rcsr_jrx(void)
+uint32_t helper_rcsr_jrx(CPULM32State *env)
{
return lm32_juart_get_jrx(env->juart_state);
}
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPULM32State *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPULM32State *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
TranslationBlock *tb;
- CPULM32State *saved_env;
int ret;
- saved_env = env;
- env = env1;
-
ret = cpu_lm32_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
}
cpu_loop_exit(env);
}
- env = saved_env;
}
#endif
{
TCGv_i32 tmp = tcg_const_i32(index);
- gen_helper_raise_exception(tmp);
+ gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
}
} else {
if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) {
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- gen_helper_hlt();
+ gen_helper_hlt(cpu_env);
} else {
tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
}
tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie);
break;
case CSR_IM:
- gen_helper_rcsr_im(cpu_R[dc->r2]);
+ gen_helper_rcsr_im(cpu_R[dc->r2], cpu_env);
break;
case CSR_IP:
- gen_helper_rcsr_ip(cpu_R[dc->r2]);
+ gen_helper_rcsr_ip(cpu_R[dc->r2], cpu_env);
break;
case CSR_CC:
tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc);
tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba);
break;
case CSR_JTX:
- gen_helper_rcsr_jtx(cpu_R[dc->r2]);
+ gen_helper_rcsr_jtx(cpu_R[dc->r2], cpu_env);
break;
case CSR_JRX:
- gen_helper_rcsr_jrx(cpu_R[dc->r2]);
+ gen_helper_rcsr_jrx(cpu_R[dc->r2], cpu_env);
break;
case CSR_ICC:
case CSR_DCC:
if (use_icount) {
gen_io_start();
}
- gen_helper_wcsr_im(cpu_R[dc->r1]);
+ gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
if (use_icount) {
gen_io_end();
if (use_icount) {
gen_io_start();
}
- gen_helper_wcsr_ip(cpu_R[dc->r1]);
+ gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
if (use_icount) {
gen_io_end();
tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]);
break;
case CSR_JTX:
- gen_helper_wcsr_jtx(cpu_R[dc->r1]);
+ gen_helper_wcsr_jtx(cpu_env, cpu_R[dc->r1]);
break;
case CSR_JRX:
- gen_helper_wcsr_jrx(cpu_R[dc->r1]);
+ gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]);
break;
case CSR_DC:
tcg_gen_mov_tl(cpu_dc, cpu_R[dc->r1]);
dec_cmpne
};
-static inline void decode(DisasContext *dc)
+static inline void decode(DisasContext *dc, uint32_t ir)
{
- uint32_t ir;
-
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
tcg_gen_debug_insn_start(dc->pc);
}
- dc->ir = ir = ldl_code(dc->pc);
+ dc->ir = ir;
LOG_DIS("%8.8x\t", dc->ir);
/* try guessing 'empty' instruction memory, although it may be a valid
gen_io_start();
}
- decode(dc);
+ decode(dc, cpu_ldl_code(env, dc->pc));
dc->pc += 4;
num_insns++;
obj-y += m68k-semi.o
obj-y += translate.o op_helper.o helper.o cpu.o
obj-$(CONFIG_SOFTMMU) += machine.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
DEF_HELPER_3(set_mac_extu, void, env, i32, i32)
DEF_HELPER_2(flush_flags, void, env, i32)
-DEF_HELPER_1(raise_exception, void, i32)
+DEF_HELPER_2(raise_exception, void, env, i32)
#include "def-helper.h"
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cpu.h"
-#include "dyngen-exec.h"
#include "helpers.h"
#if defined(CONFIG_USER_ONLY)
-void do_interrupt(CPUM68KState *env1)
+void do_interrupt(CPUM68KState *env)
{
- env1->exception_index = -1;
+ env->exception_index = -1;
}
-void do_interrupt_m68k_hardirq(CPUM68KState *env1)
+void do_interrupt_m68k_hardirq(CPUM68KState *env)
{
}
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUM68KState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUM68KState *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
TranslationBlock *tb;
- CPUM68KState *saved_env;
int ret;
- saved_env = env;
- env = env1;
ret = cpu_m68k_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
}
cpu_loop_exit(env);
}
- env = saved_env;
}
-static void do_rte(void)
+static void do_rte(CPUM68KState *env)
{
uint32_t sp;
uint32_t fmt;
sp = env->aregs[7];
- fmt = ldl_kernel(sp);
- env->pc = ldl_kernel(sp + 4);
+ fmt = cpu_ldl_kernel(env, sp);
+ env->pc = cpu_ldl_kernel(env, sp + 4);
sp |= (fmt >> 28) & 3;
env->sr = fmt & 0xffff;
m68k_switch_sp(env);
env->aregs[7] = sp + 8;
}
-static void do_interrupt_all(int is_hw)
+static void do_interrupt_all(CPUM68KState *env, int is_hw)
{
uint32_t sp;
uint32_t fmt;
switch (env->exception_index) {
case EXCP_RTE:
/* Return from an exception. */
- do_rte();
+ do_rte(env);
return;
case EXCP_HALT_INSN:
if (semihosting_enabled
&& (env->sr & SR_S) != 0
&& (env->pc & 3) == 0
- && lduw_code(env->pc - 4) == 0x4e71
- && ldl_code(env->pc) == 0x4e7bf000) {
+ && cpu_lduw_code(env, env->pc - 4) == 0x4e71
+ && cpu_ldl_code(env, env->pc) == 0x4e7bf000) {
env->pc += 4;
do_m68k_semihosting(env, env->dregs[0]);
return;
/* ??? This could cause MMU faults. */
sp &= ~3;
sp -= 4;
- stl_kernel(sp, retaddr);
+ cpu_stl_kernel(env, sp, retaddr);
sp -= 4;
- stl_kernel(sp, fmt);
+ cpu_stl_kernel(env, sp, fmt);
env->aregs[7] = sp;
/* Jump to vector. */
- env->pc = ldl_kernel(env->vbr + vector);
+ env->pc = cpu_ldl_kernel(env, env->vbr + vector);
}
-void do_interrupt(CPUM68KState *env1)
+void do_interrupt(CPUM68KState *env)
{
- CPUM68KState *saved_env;
-
- saved_env = env;
- env = env1;
- do_interrupt_all(0);
- env = saved_env;
+ do_interrupt_all(env, 0);
}
-void do_interrupt_m68k_hardirq(CPUM68KState *env1)
+void do_interrupt_m68k_hardirq(CPUM68KState *env)
{
- CPUM68KState *saved_env;
-
- saved_env = env;
- env = env1;
- do_interrupt_all(1);
- env = saved_env;
+ do_interrupt_all(env, 1);
}
#endif
-static void raise_exception(int tt)
+static void raise_exception(CPUM68KState *env, int tt)
{
env->exception_index = tt;
cpu_loop_exit(env);
}
-void HELPER(raise_exception)(uint32_t tt)
+void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
{
- raise_exception(tt);
+ raise_exception(env, tt);
}
void HELPER(divu)(CPUM68KState *env, uint32_t word)
num = env->div1;
den = env->div2;
/* ??? This needs to make sure the throwing location is accurate. */
- if (den == 0)
- raise_exception(EXCP_DIV0);
+ if (den == 0) {
+ raise_exception(env, EXCP_DIV0);
+ }
quot = num / den;
rem = num % den;
flags = 0;
- /* Avoid using a PARAM1 of zero. This breaks dyngen because it uses
- the address of a symbol, and gcc knows symbols can't have address
- zero. */
if (word && quot > 0xffff)
flags |= CCF_V;
if (quot == 0)
num = env->div1;
den = env->div2;
- if (den == 0)
- raise_exception(EXCP_DIV0);
+ if (den == 0) {
+ raise_exception(env, EXCP_DIV0);
+ }
quot = num / den;
rem = num % den;
flags = 0;
#define OS_SINGLE 4
#define OS_DOUBLE 5
-typedef void (*disas_proc)(DisasContext *, uint16_t);
+typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
#ifdef DEBUG_DISPATCH
-#define DISAS_INSN(name) \
- static void real_disas_##name (DisasContext *s, uint16_t insn); \
- static void disas_##name (DisasContext *s, uint16_t insn) { \
- qemu_log("Dispatch " #name "\n"); \
- real_disas_##name(s, insn); } \
- static void real_disas_##name (DisasContext *s, uint16_t insn)
+#define DISAS_INSN(name) \
+ static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn); \
+ static void disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn) \
+ { \
+ qemu_log("Dispatch " #name "\n"); \
+ real_disas_##name(s, env, insn); \
+ } \
+ static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn)
#else
-#define DISAS_INSN(name) \
- static void disas_##name (DisasContext *s, uint16_t insn)
+#define DISAS_INSN(name) \
+ static void disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn)
#endif
/* Generate a load from the specified address. Narrow values are
}
/* Read a 32-bit immediate constant. */
-static inline uint32_t read_im32(DisasContext *s)
+static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
{
uint32_t im;
- im = ((uint32_t)lduw_code(s->pc)) << 16;
+ im = ((uint32_t)cpu_lduw_code(env, s->pc)) << 16;
s->pc += 2;
- im |= lduw_code(s->pc);
+ im |= cpu_lduw_code(env, s->pc);
s->pc += 2;
return im;
}
/* Handle a base + index + displacement effective addresss.
A NULL_QREG base means pc-relative. */
-static TCGv gen_lea_indexed(DisasContext *s, int opsize, TCGv base)
+static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, int opsize,
+ TCGv base)
{
uint32_t offset;
uint16_t ext;
uint32_t bd, od;
offset = s->pc;
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
if ((ext & 0x30) > 0x10) {
/* base displacement */
if ((ext & 0x30) == 0x20) {
- bd = (int16_t)lduw_code(s->pc);
+ bd = (int16_t)cpu_lduw_code(env, s->pc);
s->pc += 2;
} else {
- bd = read_im32(s);
+ bd = read_im32(env, s);
}
} else {
bd = 0;
if ((ext & 3) > 1) {
/* outer displacement */
if ((ext & 3) == 2) {
- od = (int16_t)lduw_code(s->pc);
+ od = (int16_t)cpu_lduw_code(env, s->pc);
s->pc += 2;
} else {
- od = read_im32(s);
+ od = read_im32(env, s);
}
} else {
od = 0;
/* Generate code for an "effective address". Does not adjust the base
register for autoincrement addressing modes. */
-static TCGv gen_lea(DisasContext *s, uint16_t insn, int opsize)
+static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize)
{
TCGv reg;
TCGv tmp;
case 5: /* Indirect displacement. */
reg = AREG(insn, 0);
tmp = tcg_temp_new();
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
return tmp;
case 6: /* Indirect index + displacement. */
reg = AREG(insn, 0);
- return gen_lea_indexed(s, opsize, reg);
+ return gen_lea_indexed(env, s, opsize, reg);
case 7: /* Other */
switch (insn & 7) {
case 0: /* Absolute short. */
- offset = ldsw_code(s->pc);
+ offset = cpu_ldsw_code(env, s->pc);
s->pc += 2;
return tcg_const_i32(offset);
case 1: /* Absolute long. */
- offset = read_im32(s);
+ offset = read_im32(env, s);
return tcg_const_i32(offset);
case 2: /* pc displacement */
offset = s->pc;
- offset += ldsw_code(s->pc);
+ offset += cpu_ldsw_code(env, s->pc);
s->pc += 2;
return tcg_const_i32(offset);
case 3: /* pc index+displacement. */
- return gen_lea_indexed(s, opsize, NULL_QREG);
+ return gen_lea_indexed(env, s, opsize, NULL_QREG);
case 4: /* Immediate. */
default:
return NULL_QREG;
/* Helper function for gen_ea. Reuse the computed address between the
for read/write operands. */
-static inline TCGv gen_ea_once(DisasContext *s, uint16_t insn, int opsize,
- TCGv val, TCGv *addrp, ea_what what)
+static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
+ uint16_t insn, int opsize, TCGv val,
+ TCGv *addrp, ea_what what)
{
TCGv tmp;
if (addrp && what == EA_STORE) {
tmp = *addrp;
} else {
- tmp = gen_lea(s, insn, opsize);
+ tmp = gen_lea(env, s, insn, opsize);
if (IS_NULL_QREG(tmp))
return tmp;
if (addrp)
/* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
a write otherwise it is a read (0 == sign extend, -1 == zero extend).
ADDRP is non-null for readwrite operands. */
-static TCGv gen_ea(DisasContext *s, uint16_t insn, int opsize, TCGv val,
- TCGv *addrp, ea_what what)
+static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize, TCGv val, TCGv *addrp, ea_what what)
{
TCGv reg;
TCGv result;
if (addrp && what == EA_STORE) {
tmp = *addrp;
} else {
- tmp = gen_lea(s, insn, opsize);
+ tmp = gen_lea(env, s, insn, opsize);
if (IS_NULL_QREG(tmp))
return tmp;
if (addrp)
return result;
case 5: /* Indirect displacement. */
case 6: /* Indirect index + displacement. */
- return gen_ea_once(s, insn, opsize, val, addrp, what);
+ return gen_ea_once(env, s, insn, opsize, val, addrp, what);
case 7: /* Other */
switch (insn & 7) {
case 0: /* Absolute short. */
case 1: /* Absolute long. */
case 2: /* pc displacement */
case 3: /* pc index+displacement. */
- return gen_ea_once(s, insn, opsize, val, addrp, what);
+ return gen_ea_once(env, s, insn, opsize, val, addrp, what);
case 4: /* Immediate. */
/* Sign extend values for consistency. */
switch (opsize) {
case OS_BYTE:
- if (what == EA_LOADS)
- offset = ldsb_code(s->pc + 1);
- else
- offset = ldub_code(s->pc + 1);
+ if (what == EA_LOADS) {
+ offset = cpu_ldsb_code(env, s->pc + 1);
+ } else {
+ offset = cpu_ldub_code(env, s->pc + 1);
+ }
s->pc += 2;
break;
case OS_WORD:
- if (what == EA_LOADS)
- offset = ldsw_code(s->pc);
- else
- offset = lduw_code(s->pc);
+ if (what == EA_LOADS) {
+ offset = cpu_ldsw_code(env, s->pc);
+ } else {
+ offset = cpu_lduw_code(env, s->pc);
+ }
s->pc += 2;
break;
case OS_LONG:
- offset = read_im32(s);
+ offset = read_im32(env, s);
break;
default:
qemu_assert(0, "Bad immediate operand");
{
gen_flush_cc_op(s);
gen_jmp_im(s, where);
- gen_helper_raise_exception(tcg_const_i32(nr));
+ gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
}
static inline void gen_addr_fault(DisasContext *s)
gen_exception(s, s->insn_pc, EXCP_ADDRESS);
}
-#define SRC_EA(result, opsize, op_sign, addrp) do { \
- result = gen_ea(s, insn, opsize, NULL_QREG, addrp, op_sign ? EA_LOADS : EA_LOADU); \
- if (IS_NULL_QREG(result)) { \
- gen_addr_fault(s); \
- return; \
- } \
+#define SRC_EA(env, result, opsize, op_sign, addrp) do { \
+ result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
+ op_sign ? EA_LOADS : EA_LOADU); \
+ if (IS_NULL_QREG(result)) { \
+ gen_addr_fault(s); \
+ return; \
+ } \
} while (0)
-#define DEST_EA(insn, opsize, val, addrp) do { \
- TCGv ea_result = gen_ea(s, insn, opsize, val, addrp, EA_STORE); \
- if (IS_NULL_QREG(ea_result)) { \
- gen_addr_fault(s); \
- return; \
- } \
+#define DEST_EA(env, insn, opsize, val, addrp) do { \
+ TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
+ if (IS_NULL_QREG(ea_result)) { \
+ gen_addr_fault(s); \
+ return; \
+ } \
} while (0)
/* Generate a jump to an immediate address. */
DISAS_INSN(undef)
{
gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
- cpu_abort(cpu_single_env, "Illegal instruction: %04x @ %08x",
- insn, s->pc - 2);
+ cpu_abort(env, "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
}
DISAS_INSN(mulw)
tcg_gen_ext16s_i32(tmp, reg);
else
tcg_gen_ext16u_i32(tmp, reg);
- SRC_EA(src, OS_WORD, sign, NULL);
+ SRC_EA(env, src, OS_WORD, sign, NULL);
tcg_gen_mul_i32(tmp, tmp, src);
tcg_gen_mov_i32(reg, tmp);
/* Unlike m68k, coldfire always clears the overflow bit. */
} else {
tcg_gen_ext16u_i32(QREG_DIV1, reg);
}
- SRC_EA(src, OS_WORD, sign, NULL);
+ SRC_EA(env, src, OS_WORD, sign, NULL);
tcg_gen_mov_i32(QREG_DIV2, src);
if (sign) {
gen_helper_divs(cpu_env, tcg_const_i32(1));
TCGv reg;
uint16_t ext;
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
if (ext & 0x87f8) {
gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
num = DREG(ext, 12);
reg = DREG(ext, 0);
tcg_gen_mov_i32(QREG_DIV1, num);
- SRC_EA(den, OS_LONG, 0, NULL);
+ SRC_EA(env, den, OS_LONG, 0, NULL);
tcg_gen_mov_i32(QREG_DIV2, den);
if (ext & 0x0800) {
gen_helper_divs(cpu_env, tcg_const_i32(0));
reg = DREG(insn, 9);
dest = tcg_temp_new();
if (insn & 0x100) {
- SRC_EA(tmp, OS_LONG, 0, &addr);
+ SRC_EA(env, tmp, OS_LONG, 0, &addr);
src = reg;
} else {
tmp = reg;
- SRC_EA(src, OS_LONG, 0, NULL);
+ SRC_EA(env, src, OS_LONG, 0, NULL);
}
if (add) {
tcg_gen_add_i32(dest, tmp, src);
}
gen_update_cc_add(dest, src);
if (insn & 0x100) {
- DEST_EA(insn, OS_LONG, dest, &addr);
+ DEST_EA(env, insn, OS_LONG, dest, &addr);
} else {
tcg_gen_mov_i32(reg, dest);
}
else
opsize = OS_LONG;
op = (insn >> 6) & 3;
- SRC_EA(src1, opsize, 0, op ? &addr: NULL);
+ SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
src2 = DREG(insn, 9);
dest = tcg_temp_new();
break;
}
if (op)
- DEST_EA(insn, opsize, dest, &addr);
+ DEST_EA(env, insn, opsize, dest, &addr);
}
DISAS_INSN(sats)
TCGv tmp;
int is_load;
- mask = lduw_code(s->pc);
+ mask = cpu_lduw_code(env, s->pc);
s->pc += 2;
- tmp = gen_lea(s, insn, OS_LONG);
+ tmp = gen_lea(env, s, insn, OS_LONG);
if (IS_NULL_QREG(tmp)) {
gen_addr_fault(s);
return;
opsize = OS_LONG;
op = (insn >> 6) & 3;
- bitnum = lduw_code(s->pc);
+ bitnum = cpu_lduw_code(env, s->pc);
s->pc += 2;
if (bitnum & 0xff00) {
- disas_undef(s, insn);
+ disas_undef(env, s, insn);
return;
}
- SRC_EA(src1, opsize, 0, op ? &addr: NULL);
+ SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
gen_flush_flags(s);
if (opsize == OS_BYTE)
default: /* btst */
break;
}
- DEST_EA(insn, opsize, tmp, &addr);
+ DEST_EA(env, insn, opsize, tmp, &addr);
}
}
TCGv addr;
op = (insn >> 9) & 7;
- SRC_EA(src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
- im = read_im32(s);
+ SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
+ im = read_im32(env, s);
dest = tcg_temp_new();
switch (op) {
case 0: /* ori */
abort();
}
if (op != 6) {
- DEST_EA(insn, OS_LONG, dest, &addr);
+ DEST_EA(env, insn, OS_LONG, dest, &addr);
}
}
default:
abort();
}
- SRC_EA(src, opsize, 1, NULL);
+ SRC_EA(env, src, opsize, 1, NULL);
op = (insn >> 6) & 7;
if (op == 1) {
/* movea */
/* normal move */
uint16_t dest_ea;
dest_ea = ((insn >> 9) & 7) | (op << 3);
- DEST_EA(dest_ea, opsize, src, NULL);
+ DEST_EA(env, dest_ea, opsize, src, NULL);
/* This will be correct because loads sign extend. */
gen_logic_cc(s, src);
}
TCGv tmp;
reg = AREG(insn, 9);
- tmp = gen_lea(s, insn, OS_LONG);
+ tmp = gen_lea(env, s, insn, OS_LONG);
if (IS_NULL_QREG(tmp)) {
gen_addr_fault(s);
return;
default:
abort();
}
- DEST_EA(insn, opsize, tcg_const_i32(0), NULL);
+ DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
gen_logic_cc(s, tcg_const_i32(0));
}
}
}
-static void gen_set_sr(DisasContext *s, uint16_t insn, int ccr_only)
+static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int ccr_only)
{
TCGv tmp;
TCGv reg;
else if ((insn & 0x3f) == 0x3c)
{
uint16_t val;
- val = lduw_code(s->pc);
+ val = cpu_lduw_code(env, s->pc);
s->pc += 2;
gen_set_sr_im(s, val, ccr_only);
}
else
- disas_undef(s, insn);
+ disas_undef(env, s, insn);
}
DISAS_INSN(move_to_ccr)
{
- gen_set_sr(s, insn, 1);
+ gen_set_sr(env, s, insn, 1);
}
DISAS_INSN(not)
{
TCGv tmp;
- tmp = gen_lea(s, insn, OS_LONG);
+ tmp = gen_lea(env, s, insn, OS_LONG);
if (IS_NULL_QREG(tmp)) {
gen_addr_fault(s);
return;
default:
abort();
}
- SRC_EA(tmp, opsize, 1, NULL);
+ SRC_EA(env, tmp, opsize, 1, NULL);
gen_logic_cc(s, tmp);
}
TCGv addr;
dest = tcg_temp_new();
- SRC_EA(src1, OS_BYTE, 1, &addr);
+ SRC_EA(env, src1, OS_BYTE, 1, &addr);
gen_logic_cc(s, src1);
tcg_gen_ori_i32(dest, src1, 0x80);
- DEST_EA(insn, OS_BYTE, dest, &addr);
+ DEST_EA(env, insn, OS_BYTE, dest, &addr);
}
DISAS_INSN(mull)
/* The upper 32 bits of the product are discarded, so
muls.l and mulu.l are functionally equivalent. */
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
if (ext & 0x87ff) {
gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
return;
}
reg = DREG(ext, 12);
- SRC_EA(src1, OS_LONG, 0, NULL);
+ SRC_EA(env, src1, OS_LONG, 0, NULL);
dest = tcg_temp_new();
tcg_gen_mul_i32(dest, src1, reg);
tcg_gen_mov_i32(reg, dest);
TCGv reg;
TCGv tmp;
- offset = ldsw_code(s->pc);
+ offset = cpu_ldsw_code(env, s->pc);
s->pc += 2;
reg = AREG(insn, 0);
tmp = tcg_temp_new();
/* Load the target address first to ensure correct exception
behavior. */
- tmp = gen_lea(s, insn, OS_LONG);
+ tmp = gen_lea(env, s, insn, OS_LONG);
if (IS_NULL_QREG(tmp)) {
gen_addr_fault(s);
return;
int val;
TCGv addr;
- SRC_EA(src1, OS_LONG, 0, &addr);
+ SRC_EA(env, src1, OS_LONG, 0, &addr);
val = (insn >> 9) & 7;
if (val == 0)
val = 8;
}
gen_update_cc_add(dest, src2);
}
- DEST_EA(insn, OS_LONG, dest, &addr);
+ DEST_EA(env, insn, OS_LONG, dest, &addr);
}
DISAS_INSN(tpf)
case 4: /* No extension words. */
break;
default:
- disas_undef(s, insn);
+ disas_undef(env, s, insn);
}
}
op = (insn >> 8) & 0xf;
offset = (int8_t)insn;
if (offset == 0) {
- offset = ldsw_code(s->pc);
+ offset = cpu_ldsw_code(env, s->pc);
s->pc += 2;
} else if (offset == -1) {
- offset = read_im32(s);
+ offset = read_im32(env, s);
}
if (op == 1) {
/* bsr */
opsize = OS_WORD;
else
opsize = OS_BYTE;
- SRC_EA(src, opsize, (insn & 0x80) == 0, NULL);
+ SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
reg = DREG(insn, 9);
tcg_gen_mov_i32(reg, src);
gen_logic_cc(s, src);
reg = DREG(insn, 9);
dest = tcg_temp_new();
if (insn & 0x100) {
- SRC_EA(src, OS_LONG, 0, &addr);
+ SRC_EA(env, src, OS_LONG, 0, &addr);
tcg_gen_or_i32(dest, src, reg);
- DEST_EA(insn, OS_LONG, dest, &addr);
+ DEST_EA(env, insn, OS_LONG, dest, &addr);
} else {
- SRC_EA(src, OS_LONG, 0, NULL);
+ SRC_EA(env, src, OS_LONG, 0, NULL);
tcg_gen_or_i32(dest, src, reg);
tcg_gen_mov_i32(reg, dest);
}
TCGv src;
TCGv reg;
- SRC_EA(src, OS_LONG, 0, NULL);
+ SRC_EA(env, src, OS_LONG, 0, NULL);
reg = AREG(insn, 9);
tcg_gen_sub_i32(reg, reg, src);
}
val = -1;
src = tcg_const_i32(val);
gen_logic_cc(s, src);
- DEST_EA(insn, OS_LONG, src, NULL);
+ DEST_EA(env, insn, OS_LONG, src, NULL);
}
DISAS_INSN(cmp)
default:
abort();
}
- SRC_EA(src, opsize, 1, NULL);
+ SRC_EA(env, src, opsize, 1, NULL);
reg = DREG(insn, 9);
dest = tcg_temp_new();
tcg_gen_sub_i32(dest, reg, src);
} else {
opsize = OS_WORD;
}
- SRC_EA(src, opsize, 1, NULL);
+ SRC_EA(env, src, opsize, 1, NULL);
reg = AREG(insn, 9);
dest = tcg_temp_new();
tcg_gen_sub_i32(dest, reg, src);
TCGv dest;
TCGv addr;
- SRC_EA(src, OS_LONG, 0, &addr);
+ SRC_EA(env, src, OS_LONG, 0, &addr);
reg = DREG(insn, 9);
dest = tcg_temp_new();
tcg_gen_xor_i32(dest, src, reg);
gen_logic_cc(s, dest);
- DEST_EA(insn, OS_LONG, dest, &addr);
+ DEST_EA(env, insn, OS_LONG, dest, &addr);
}
DISAS_INSN(and)
reg = DREG(insn, 9);
dest = tcg_temp_new();
if (insn & 0x100) {
- SRC_EA(src, OS_LONG, 0, &addr);
+ SRC_EA(env, src, OS_LONG, 0, &addr);
tcg_gen_and_i32(dest, src, reg);
- DEST_EA(insn, OS_LONG, dest, &addr);
+ DEST_EA(env, insn, OS_LONG, dest, &addr);
} else {
- SRC_EA(src, OS_LONG, 0, NULL);
+ SRC_EA(env, src, OS_LONG, 0, NULL);
tcg_gen_and_i32(dest, src, reg);
tcg_gen_mov_i32(reg, dest);
}
TCGv src;
TCGv reg;
- SRC_EA(src, OS_LONG, 0, NULL);
+ SRC_EA(env, src, OS_LONG, 0, NULL);
reg = AREG(insn, 9);
tcg_gen_add_i32(reg, reg, src);
}
uint32_t addr;
addr = s->pc - 2;
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
if (ext != 0x46FC) {
gen_exception(s, addr, EXCP_UNSUPPORTED);
return;
}
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
if (IS_USER(s) || (ext & SR_S) == 0) {
gen_exception(s, addr, EXCP_PRIVILEGE);
gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
return;
}
- gen_set_sr(s, insn, 0);
+ gen_set_sr(env, s, insn, 0);
gen_lookup_tb(s);
}
return;
}
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
gen_set_sr_im(s, ext, 0);
return;
}
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
if (ext & 0x8000) {
int set_dest;
int opsize;
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
opmode = ext & 0x7f;
switch ((ext >> 13) & 7) {
tcg_gen_addi_i32(tmp32, tmp32, -8);
break;
case 5:
- offset = ldsw_code(s->pc);
+ offset = cpu_ldsw_code(env, s->pc);
s->pc += 2;
tcg_gen_addi_i32(tmp32, tmp32, offset);
break;
default:
goto undef;
}
- DEST_EA(insn, opsize, tmp32, NULL);
+ DEST_EA(env, insn, opsize, tmp32, NULL);
tcg_temp_free_i32(tmp32);
return;
case 4: /* fmove to control register. */
(ext >> 10) & 7);
goto undef;
}
- DEST_EA(insn, OS_LONG, tmp32, NULL);
+ DEST_EA(env, insn, OS_LONG, tmp32, NULL);
break;
case 6: /* fmovem */
case 7:
int i;
if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
goto undef;
- tmp32 = gen_lea(s, insn, OS_LONG);
+ tmp32 = gen_lea(env, s, insn, OS_LONG);
if (IS_NULL_QREG(tmp32)) {
gen_addr_fault(s);
return;
tcg_gen_addi_i32(tmp32, tmp32, -8);
break;
case 5:
- offset = ldsw_code(s->pc);
+ offset = cpu_ldsw_code(env, s->pc);
s->pc += 2;
tcg_gen_addi_i32(tmp32, tmp32, offset);
break;
case 7:
- offset = ldsw_code(s->pc);
+ offset = cpu_ldsw_code(env, s->pc);
offset += s->pc - 2;
s->pc += 2;
tcg_gen_addi_i32(tmp32, tmp32, offset);
}
tcg_temp_free_i32(tmp32);
} else {
- SRC_EA(tmp32, opsize, 1, NULL);
+ SRC_EA(env, tmp32, opsize, 1, NULL);
src = tcg_temp_new_i64();
switch (opsize) {
case OS_LONG:
undef:
/* FIXME: Is this right for offset addressing modes? */
s->pc -= 2;
- disas_undef_fpu(s, insn);
+ disas_undef_fpu(env, s, insn);
}
DISAS_INSN(fbcc)
int l1;
addr = s->pc;
- offset = ldsw_code(s->pc);
+ offset = cpu_ldsw_code(env, s->pc);
s->pc += 2;
if (insn & (1 << 6)) {
- offset = (offset << 16) | lduw_code(s->pc);
+ offset = (offset << 16) | cpu_lduw_code(env, s->pc);
s->pc += 2;
}
s->done_mac = 1;
}
- ext = lduw_code(s->pc);
+ ext = cpu_lduw_code(env, s->pc);
s->pc += 2;
acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
- disas_undef(s, insn);
+ disas_undef(env, s, insn);
return;
}
if (insn & 0x30) {
/* MAC with load. */
- tmp = gen_lea(s, insn, OS_LONG);
+ tmp = gen_lea(env, s, insn, OS_LONG);
addr = tcg_temp_new();
tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
/* Load the value now to ensure correct exception behavior.
int accnum;
accnum = (insn >> 9) & 3;
acc = MACREG(accnum);
- SRC_EA(val, OS_LONG, 0, NULL);
+ SRC_EA(env, val, OS_LONG, 0, NULL);
if (s->env->macsr & MACSR_FI) {
tcg_gen_ext_i32_i64(acc, val);
tcg_gen_shli_i64(acc, acc, 8);
DISAS_INSN(to_macsr)
{
TCGv val;
- SRC_EA(val, OS_LONG, 0, NULL);
+ SRC_EA(env, val, OS_LONG, 0, NULL);
gen_helper_set_macsr(cpu_env, val);
gen_lookup_tb(s);
}
DISAS_INSN(to_mask)
{
TCGv val;
- SRC_EA(val, OS_LONG, 0, NULL);
+ SRC_EA(env, val, OS_LONG, 0, NULL);
tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
}
{
TCGv val;
TCGv acc;
- SRC_EA(val, OS_LONG, 0, NULL);
+ SRC_EA(env, val, OS_LONG, 0, NULL);
acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
if (s->env->macsr & MACSR_FI)
gen_helper_set_mac_extf(cpu_env, val, acc);
{
uint16_t insn;
- insn = lduw_code(s->pc);
+ insn = cpu_lduw_code(env, s->pc);
s->pc += 2;
- opcode_table[insn](s, insn);
+ opcode_table[insn](env, s, insn);
}
/* generate intermediate code for basic block 'tb'. */
gen_flush_cc_op(dc);
tcg_gen_movi_i32(QREG_PC, dc->pc);
}
- gen_helper_raise_exception(tcg_const_i32(EXCP_DEBUG));
+ gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
} else {
switch(dc->is_jmp) {
case DISAS_NEXT:
obj-y += translate.o op_helper.o helper.o cpu.o
obj-$(CONFIG_SOFTMMU) += mmu.o machine.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
#include "def-helper.h"
-DEF_HELPER_1(raise_exception, void, i32)
-DEF_HELPER_0(debug, void)
+DEF_HELPER_2(raise_exception, void, env, i32)
+DEF_HELPER_1(debug, void, env)
DEF_HELPER_FLAGS_3(carry, TCG_CALL_PURE | TCG_CALL_CONST, i32, i32, i32, i32)
DEF_HELPER_2(cmp, i32, i32, i32)
DEF_HELPER_2(cmpu, i32, i32, i32)
DEF_HELPER_FLAGS_1(clz, TCG_CALL_PURE | TCG_CALL_CONST, i32, i32)
-DEF_HELPER_2(divs, i32, i32, i32)
-DEF_HELPER_2(divu, i32, i32, i32)
-
-DEF_HELPER_2(fadd, i32, i32, i32)
-DEF_HELPER_2(frsub, i32, i32, i32)
-DEF_HELPER_2(fmul, i32, i32, i32)
-DEF_HELPER_2(fdiv, i32, i32, i32)
-DEF_HELPER_1(flt, i32, i32)
-DEF_HELPER_1(fint, i32, i32)
-DEF_HELPER_1(fsqrt, i32, i32)
-
-DEF_HELPER_2(fcmp_un, i32, i32, i32)
-DEF_HELPER_2(fcmp_lt, i32, i32, i32)
-DEF_HELPER_2(fcmp_eq, i32, i32, i32)
-DEF_HELPER_2(fcmp_le, i32, i32, i32)
-DEF_HELPER_2(fcmp_gt, i32, i32, i32)
-DEF_HELPER_2(fcmp_ne, i32, i32, i32)
-DEF_HELPER_2(fcmp_ge, i32, i32, i32)
+DEF_HELPER_3(divs, i32, env, i32, i32)
+DEF_HELPER_3(divu, i32, env, i32, i32)
+
+DEF_HELPER_3(fadd, i32, env, i32, i32)
+DEF_HELPER_3(frsub, i32, env, i32, i32)
+DEF_HELPER_3(fmul, i32, env, i32, i32)
+DEF_HELPER_3(fdiv, i32, env, i32, i32)
+DEF_HELPER_2(flt, i32, env, i32)
+DEF_HELPER_2(fint, i32, env, i32)
+DEF_HELPER_2(fsqrt, i32, env, i32)
+
+DEF_HELPER_3(fcmp_un, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_lt, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_eq, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_le, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_gt, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_ne, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_ge, i32, env, i32, i32)
DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_PURE | TCG_CALL_CONST, i32, i32, i32)
#if !defined(CONFIG_USER_ONLY)
-DEF_HELPER_1(mmu_read, i32, i32)
-DEF_HELPER_2(mmu_write, void, i32, i32)
+DEF_HELPER_2(mmu_read, i32, env, i32)
+DEF_HELPER_3(mmu_write, void, env, i32, i32)
#endif
-DEF_HELPER_4(memalign, void, i32, i32, i32, i32)
-DEF_HELPER_1(stackprot, void, i32)
+DEF_HELPER_5(memalign, void, env, i32, i32, i32, i32)
+DEF_HELPER_2(stackprot, void, env, i32)
DEF_HELPER_2(get, i32, i32, i32)
DEF_HELPER_3(put, void, i32, i32, i32)
#include <assert.h>
#include "cpu.h"
-#include "dyngen-exec.h"
#include "helper.h"
#include "host-utils.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUMBState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUMBState *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
TranslationBlock *tb;
- CPUMBState *saved_env;
int ret;
- saved_env = env;
- env = env1;
-
ret = cpu_mb_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
}
cpu_loop_exit(env);
}
- env = saved_env;
}
#endif
return 0xdead0000 | id;
}
-void helper_raise_exception(uint32_t index)
+void helper_raise_exception(CPUMBState *env, uint32_t index)
{
env->exception_index = index;
cpu_loop_exit(env);
}
-void helper_debug(void)
+void helper_debug(CPUMBState *env)
{
int i;
return ncf;
}
-static inline int div_prepare(uint32_t a, uint32_t b)
+static inline int div_prepare(CPUMBState *env, uint32_t a, uint32_t b)
{
if (b == 0) {
env->sregs[SR_MSR] |= MSR_DZ;
if ((env->sregs[SR_MSR] & MSR_EE)
&& !(env->pvr.regs[2] & PVR2_DIV_ZERO_EXC_MASK)) {
env->sregs[SR_ESR] = ESR_EC_DIVZERO;
- helper_raise_exception(EXCP_HW_EXCP);
+ helper_raise_exception(env, EXCP_HW_EXCP);
}
return 0;
}
return 1;
}
-uint32_t helper_divs(uint32_t a, uint32_t b)
+uint32_t helper_divs(CPUMBState *env, uint32_t a, uint32_t b)
{
- if (!div_prepare(a, b))
+ if (!div_prepare(env, a, b)) {
return 0;
+ }
return (int32_t)a / (int32_t)b;
}
-uint32_t helper_divu(uint32_t a, uint32_t b)
+uint32_t helper_divu(CPUMBState *env, uint32_t a, uint32_t b)
{
- if (!div_prepare(a, b))
+ if (!div_prepare(env, a, b)) {
return 0;
+ }
return a / b;
}
/* raise FPU exception. */
-static void raise_fpu_exception(void)
+static void raise_fpu_exception(CPUMBState *env)
{
env->sregs[SR_ESR] = ESR_EC_FPU;
- helper_raise_exception(EXCP_HW_EXCP);
+ helper_raise_exception(env, EXCP_HW_EXCP);
}
-static void update_fpu_flags(int flags)
+static void update_fpu_flags(CPUMBState *env, int flags)
{
int raise = 0;
if (raise
&& (env->pvr.regs[2] & PVR2_FPU_EXC_MASK)
&& (env->sregs[SR_MSR] & MSR_EE)) {
- raise_fpu_exception();
+ raise_fpu_exception(env);
}
}
-uint32_t helper_fadd(uint32_t a, uint32_t b)
+uint32_t helper_fadd(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fd, fa, fb;
int flags;
fd.f = float32_add(fa.f, fb.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags);
+ update_fpu_flags(env, flags);
return fd.l;
}
-uint32_t helper_frsub(uint32_t a, uint32_t b)
+uint32_t helper_frsub(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fd, fa, fb;
int flags;
fb.l = b;
fd.f = float32_sub(fb.f, fa.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags);
+ update_fpu_flags(env, flags);
return fd.l;
}
-uint32_t helper_fmul(uint32_t a, uint32_t b)
+uint32_t helper_fmul(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fd, fa, fb;
int flags;
fb.l = b;
fd.f = float32_mul(fa.f, fb.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags);
+ update_fpu_flags(env, flags);
return fd.l;
}
-uint32_t helper_fdiv(uint32_t a, uint32_t b)
+uint32_t helper_fdiv(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fd, fa, fb;
int flags;
fb.l = b;
fd.f = float32_div(fb.f, fa.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags);
+ update_fpu_flags(env, flags);
return fd.l;
}
-uint32_t helper_fcmp_un(uint32_t a, uint32_t b)
+uint32_t helper_fcmp_un(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fa, fb;
uint32_t r = 0;
fb.l = b;
if (float32_is_signaling_nan(fa.f) || float32_is_signaling_nan(fb.f)) {
- update_fpu_flags(float_flag_invalid);
+ update_fpu_flags(env, float_flag_invalid);
r = 1;
}
return r;
}
-uint32_t helper_fcmp_lt(uint32_t a, uint32_t b)
+uint32_t helper_fcmp_lt(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fa, fb;
int r;
fb.l = b;
r = float32_lt(fb.f, fa.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags & float_flag_invalid);
+ update_fpu_flags(env, flags & float_flag_invalid);
return r;
}
-uint32_t helper_fcmp_eq(uint32_t a, uint32_t b)
+uint32_t helper_fcmp_eq(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fa, fb;
int flags;
fb.l = b;
r = float32_eq_quiet(fa.f, fb.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags & float_flag_invalid);
+ update_fpu_flags(env, flags & float_flag_invalid);
return r;
}
-uint32_t helper_fcmp_le(uint32_t a, uint32_t b)
+uint32_t helper_fcmp_le(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fa, fb;
int flags;
set_float_exception_flags(0, &env->fp_status);
r = float32_le(fa.f, fb.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags & float_flag_invalid);
+ update_fpu_flags(env, flags & float_flag_invalid);
return r;
}
-uint32_t helper_fcmp_gt(uint32_t a, uint32_t b)
+uint32_t helper_fcmp_gt(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fa, fb;
int flags, r;
set_float_exception_flags(0, &env->fp_status);
r = float32_lt(fa.f, fb.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags & float_flag_invalid);
+ update_fpu_flags(env, flags & float_flag_invalid);
return r;
}
-uint32_t helper_fcmp_ne(uint32_t a, uint32_t b)
+uint32_t helper_fcmp_ne(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fa, fb;
int flags, r;
set_float_exception_flags(0, &env->fp_status);
r = !float32_eq_quiet(fa.f, fb.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags & float_flag_invalid);
+ update_fpu_flags(env, flags & float_flag_invalid);
return r;
}
-uint32_t helper_fcmp_ge(uint32_t a, uint32_t b)
+uint32_t helper_fcmp_ge(CPUMBState *env, uint32_t a, uint32_t b)
{
CPU_FloatU fa, fb;
int flags, r;
set_float_exception_flags(0, &env->fp_status);
r = !float32_lt(fa.f, fb.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags & float_flag_invalid);
+ update_fpu_flags(env, flags & float_flag_invalid);
return r;
}
-uint32_t helper_flt(uint32_t a)
+uint32_t helper_flt(CPUMBState *env, uint32_t a)
{
CPU_FloatU fd, fa;
return fd.l;
}
-uint32_t helper_fint(uint32_t a)
+uint32_t helper_fint(CPUMBState *env, uint32_t a)
{
CPU_FloatU fa;
uint32_t r;
fa.l = a;
r = float32_to_int32(fa.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags);
+ update_fpu_flags(env, flags);
return r;
}
-uint32_t helper_fsqrt(uint32_t a)
+uint32_t helper_fsqrt(CPUMBState *env, uint32_t a)
{
CPU_FloatU fd, fa;
int flags;
fa.l = a;
fd.l = float32_sqrt(fa.f, &env->fp_status);
flags = get_float_exception_flags(&env->fp_status);
- update_fpu_flags(flags);
+ update_fpu_flags(env, flags);
return fd.l;
}
return 0;
}
-void helper_memalign(uint32_t addr, uint32_t dr, uint32_t wr, uint32_t mask)
+void helper_memalign(CPUMBState *env, uint32_t addr, uint32_t dr, uint32_t wr,
+ uint32_t mask)
{
if (addr & mask) {
qemu_log_mask(CPU_LOG_INT,
if (!(env->sregs[SR_MSR] & MSR_EE)) {
return;
}
- helper_raise_exception(EXCP_HW_EXCP);
+ helper_raise_exception(env, EXCP_HW_EXCP);
}
}
-void helper_stackprot(uint32_t addr)
+void helper_stackprot(CPUMBState *env, uint32_t addr)
{
if (addr < env->slr || addr > env->shr) {
qemu_log("Stack protector violation at %x %x %x\n",
addr, env->slr, env->shr);
env->sregs[SR_EAR] = addr;
env->sregs[SR_ESR] = ESR_EC_STACKPROT;
- helper_raise_exception(EXCP_HW_EXCP);
+ helper_raise_exception(env, EXCP_HW_EXCP);
}
}
#if !defined(CONFIG_USER_ONLY)
/* Writes/reads to the MMU's special regs end up here. */
-uint32_t helper_mmu_read(uint32_t rn)
+uint32_t helper_mmu_read(CPUMBState *env, uint32_t rn)
{
return mmu_read(env, rn);
}
-void helper_mmu_write(uint32_t rn, uint32_t v)
+void helper_mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
{
mmu_write(env, rn, v);
}
-void cpu_unassigned_access(CPUMBState *env1, target_phys_addr_t addr,
+void cpu_unassigned_access(CPUMBState *env, target_phys_addr_t addr,
int is_write, int is_exec, int is_asi, int size)
{
- CPUMBState *saved_env;
-
- saved_env = env;
- env = env1;
-
qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n",
addr, is_write, is_exec);
if (!(env->sregs[SR_MSR] & MSR_EE)) {
- env = saved_env;
return;
}
if (is_exec) {
if ((env->pvr.regs[2] & PVR2_IOPB_BUS_EXC_MASK)) {
env->sregs[SR_ESR] = ESR_EC_INSN_BUS;
- helper_raise_exception(EXCP_HW_EXCP);
+ helper_raise_exception(env, EXCP_HW_EXCP);
}
} else {
if ((env->pvr.regs[2] & PVR2_DOPB_BUS_EXC_MASK)) {
env->sregs[SR_ESR] = ESR_EC_DATA_BUS;
- helper_raise_exception(EXCP_HW_EXCP);
+ helper_raise_exception(env, EXCP_HW_EXCP);
}
}
- env = saved_env;
}
#endif
t_sync_flags(dc);
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
- gen_helper_raise_exception(tmp);
+ gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
dc->is_jmp = DISAS_UPDATE;
}
sr &= 7;
LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
if (to)
- gen_helper_mmu_write(tcg_const_tl(sr), cpu_R[dc->ra]);
+ gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
else
- gen_helper_mmu_read(cpu_R[dc->rd], tcg_const_tl(sr));
+ gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
return;
}
#endif
}
if (u)
- gen_helper_divu(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
+ gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
+ cpu_R[dc->ra]);
else
- gen_helper_divs(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
+ gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
+ cpu_R[dc->ra]);
if (!dc->rd)
tcg_gen_movi_tl(cpu_R[dc->rd], 0);
}
tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
if (stackprot) {
- gen_helper_stackprot(*t);
+ gen_helper_stackprot(cpu_env, *t);
}
return t;
}
}
if (stackprot) {
- gen_helper_stackprot(*t);
+ gen_helper_stackprot(cpu_env, *t);
}
return t;
}
gen_load(dc, v, *addr, size);
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
- gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
+ gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
tcg_const_tl(0), tcg_const_tl(size - 1));
if (dc->rd) {
if (rev) {
* the alignment checks in between the probe and the mem
* access.
*/
- gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
+ gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
tcg_const_tl(1), tcg_const_tl(size - 1));
}
switch (fpu_insn) {
case 0:
- gen_helper_fadd(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
+ cpu_R[dc->rb]);
break;
case 1:
- gen_helper_frsub(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
+ cpu_R[dc->rb]);
break;
case 2:
- gen_helper_fmul(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
+ cpu_R[dc->rb]);
break;
case 3:
- gen_helper_fdiv(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
+ cpu_R[dc->rb]);
break;
case 4:
switch ((dc->ir >> 4) & 7) {
case 0:
- gen_helper_fcmp_un(cpu_R[dc->rd],
+ gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 1:
- gen_helper_fcmp_lt(cpu_R[dc->rd],
+ gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 2:
- gen_helper_fcmp_eq(cpu_R[dc->rd],
+ gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 3:
- gen_helper_fcmp_le(cpu_R[dc->rd],
+ gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 4:
- gen_helper_fcmp_gt(cpu_R[dc->rd],
+ gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 5:
- gen_helper_fcmp_ne(cpu_R[dc->rd],
+ gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 6:
- gen_helper_fcmp_ge(cpu_R[dc->rd],
+ gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
cpu_R[dc->ra], cpu_R[dc->rb]);
break;
default:
if (!dec_check_fpuv2(dc)) {
return;
}
- gen_helper_flt(cpu_R[dc->rd], cpu_R[dc->ra]);
+ gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
break;
case 6:
if (!dec_check_fpuv2(dc)) {
return;
}
- gen_helper_fint(cpu_R[dc->rd], cpu_R[dc->ra]);
+ gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
break;
case 7:
if (!dec_check_fpuv2(dc)) {
return;
}
- gen_helper_fsqrt(cpu_R[dc->rd], cpu_R[dc->ra]);
+ gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
break;
default:
{{0, 0}, dec_null}
};
-static inline void decode(DisasContext *dc)
+static inline void decode(DisasContext *dc, uint32_t ir)
{
- uint32_t ir;
int i;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
tcg_gen_debug_insn_start(dc->pc);
- dc->ir = ir = ldl_code(dc->pc);
+ dc->ir = ir;
LOG_DIS("%8.8x\t", dc->ir);
if (dc->ir)
gen_io_start();
dc->clear_imm = 1;
- decode(dc);
+ decode(dc, cpu_ldl_code(env, dc->pc));
if (dc->clear_imm)
dc->tb_flags &= ~IMM_FLAG;
dc->pc += 4;
if (dc->is_jmp != DISAS_JUMP) {
tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
}
- gen_helper_raise_exception(tmp);
+ gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
} else {
switch(dc->is_jmp) {
obj-y += translate.o op_helper.o helper.o cpu.o
obj-$(CONFIG_SOFTMMU) += machine.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
uint32_t nb_tlb;
uint32_t tlb_in_use;
int (*map_address) (struct CPUMIPSState *env, target_phys_addr_t *physical, int *prot, target_ulong address, int rw, int access_type);
- void (*helper_tlbwi) (void);
- void (*helper_tlbwr) (void);
- void (*helper_tlbp) (void);
- void (*helper_tlbr) (void);
+ void (*helper_tlbwi)(struct CPUMIPSState *env);
+ void (*helper_tlbwr)(struct CPUMIPSState *env);
+ void (*helper_tlbp)(struct CPUMIPSState *env);
+ void (*helper_tlbr)(struct CPUMIPSState *env);
union {
struct {
r4k_tlb_t tlb[MIPS_TLB_MAX];
target_ulong address, int rw, int access_type);
int r4k_map_address (CPUMIPSState *env, target_phys_addr_t *physical, int *prot,
target_ulong address, int rw, int access_type);
-void r4k_helper_tlbwi (void);
-void r4k_helper_tlbwr (void);
-void r4k_helper_tlbp (void);
-void r4k_helper_tlbr (void);
+void r4k_helper_tlbwi(CPUMIPSState *env);
+void r4k_helper_tlbwr(CPUMIPSState *env);
+void r4k_helper_tlbp(CPUMIPSState *env);
+void r4k_helper_tlbr(CPUMIPSState *env);
void cpu_unassigned_access(CPUMIPSState *env, target_phys_addr_t addr,
int is_write, int is_exec, int unused, int size);
#include "def-helper.h"
-DEF_HELPER_2(raise_exception_err, noreturn, i32, int)
-DEF_HELPER_1(raise_exception, noreturn, i32)
+DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int)
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
#ifdef TARGET_MIPS64
-DEF_HELPER_3(ldl, tl, tl, tl, int)
-DEF_HELPER_3(ldr, tl, tl, tl, int)
-DEF_HELPER_3(sdl, void, tl, tl, int)
-DEF_HELPER_3(sdr, void, tl, tl, int)
+DEF_HELPER_4(ldl, tl, env, tl, tl, int)
+DEF_HELPER_4(ldr, tl, env, tl, tl, int)
+DEF_HELPER_4(sdl, void, env, tl, tl, int)
+DEF_HELPER_4(sdr, void, env, tl, tl, int)
#endif
-DEF_HELPER_3(lwl, tl, tl, tl, int)
-DEF_HELPER_3(lwr, tl, tl, tl, int)
-DEF_HELPER_3(swl, void, tl, tl, int)
-DEF_HELPER_3(swr, void, tl, tl, int)
+DEF_HELPER_4(lwl, tl, env, tl, tl, int)
+DEF_HELPER_4(lwr, tl, env, tl, tl, int)
+DEF_HELPER_4(swl, void, env, tl, tl, int)
+DEF_HELPER_4(swr, void, env, tl, tl, int)
#ifndef CONFIG_USER_ONLY
-DEF_HELPER_2(ll, tl, tl, int)
-DEF_HELPER_3(sc, tl, tl, tl, int)
+DEF_HELPER_3(ll, tl, env, tl, int)
+DEF_HELPER_4(sc, tl, env, tl, tl, int)
#ifdef TARGET_MIPS64
-DEF_HELPER_2(lld, tl, tl, int)
-DEF_HELPER_3(scd, tl, tl, tl, int)
+DEF_HELPER_3(lld, tl, env, tl, int)
+DEF_HELPER_4(scd, tl, env, tl, tl, int)
#endif
#endif
#ifdef TARGET_MIPS64
DEF_HELPER_FLAGS_1(dclo, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl)
DEF_HELPER_FLAGS_1(dclz, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl)
-DEF_HELPER_2(dmult, void, tl, tl)
-DEF_HELPER_2(dmultu, void, tl, tl)
+DEF_HELPER_3(dmult, void, env, tl, tl)
+DEF_HELPER_3(dmultu, void, env, tl, tl)
#endif
-DEF_HELPER_2(muls, tl, tl, tl)
-DEF_HELPER_2(mulsu, tl, tl, tl)
-DEF_HELPER_2(macc, tl, tl, tl)
-DEF_HELPER_2(maccu, tl, tl, tl)
-DEF_HELPER_2(msac, tl, tl, tl)
-DEF_HELPER_2(msacu, tl, tl, tl)
-DEF_HELPER_2(mulhi, tl, tl, tl)
-DEF_HELPER_2(mulhiu, tl, tl, tl)
-DEF_HELPER_2(mulshi, tl, tl, tl)
-DEF_HELPER_2(mulshiu, tl, tl, tl)
-DEF_HELPER_2(macchi, tl, tl, tl)
-DEF_HELPER_2(macchiu, tl, tl, tl)
-DEF_HELPER_2(msachi, tl, tl, tl)
-DEF_HELPER_2(msachiu, tl, tl, tl)
+DEF_HELPER_3(muls, tl, env, tl, tl)
+DEF_HELPER_3(mulsu, tl, env, tl, tl)
+DEF_HELPER_3(macc, tl, env, tl, tl)
+DEF_HELPER_3(maccu, tl, env, tl, tl)
+DEF_HELPER_3(msac, tl, env, tl, tl)
+DEF_HELPER_3(msacu, tl, env, tl, tl)
+DEF_HELPER_3(mulhi, tl, env, tl, tl)
+DEF_HELPER_3(mulhiu, tl, env, tl, tl)
+DEF_HELPER_3(mulshi, tl, env, tl, tl)
+DEF_HELPER_3(mulshiu, tl, env, tl, tl)
+DEF_HELPER_3(macchi, tl, env, tl, tl)
+DEF_HELPER_3(macchiu, tl, env, tl, tl)
+DEF_HELPER_3(msachi, tl, env, tl, tl)
+DEF_HELPER_3(msachiu, tl, env, tl, tl)
#ifndef CONFIG_USER_ONLY
/* CP0 helpers */
-DEF_HELPER_0(mfc0_mvpcontrol, tl)
-DEF_HELPER_0(mfc0_mvpconf0, tl)
-DEF_HELPER_0(mfc0_mvpconf1, tl)
-DEF_HELPER_0(mftc0_vpecontrol, tl)
-DEF_HELPER_0(mftc0_vpeconf0, tl)
-DEF_HELPER_0(mfc0_random, tl)
-DEF_HELPER_0(mfc0_tcstatus, tl)
-DEF_HELPER_0(mftc0_tcstatus, tl)
-DEF_HELPER_0(mfc0_tcbind, tl)
-DEF_HELPER_0(mftc0_tcbind, tl)
-DEF_HELPER_0(mfc0_tcrestart, tl)
-DEF_HELPER_0(mftc0_tcrestart, tl)
-DEF_HELPER_0(mfc0_tchalt, tl)
-DEF_HELPER_0(mftc0_tchalt, tl)
-DEF_HELPER_0(mfc0_tccontext, tl)
-DEF_HELPER_0(mftc0_tccontext, tl)
-DEF_HELPER_0(mfc0_tcschedule, tl)
-DEF_HELPER_0(mftc0_tcschedule, tl)
-DEF_HELPER_0(mfc0_tcschefback, tl)
-DEF_HELPER_0(mftc0_tcschefback, tl)
-DEF_HELPER_0(mfc0_count, tl)
-DEF_HELPER_0(mftc0_entryhi, tl)
-DEF_HELPER_0(mftc0_status, tl)
-DEF_HELPER_0(mftc0_cause, tl)
-DEF_HELPER_0(mftc0_epc, tl)
-DEF_HELPER_0(mftc0_ebase, tl)
-DEF_HELPER_1(mftc0_configx, tl, tl)
-DEF_HELPER_0(mfc0_lladdr, tl)
-DEF_HELPER_1(mfc0_watchlo, tl, i32)
-DEF_HELPER_1(mfc0_watchhi, tl, i32)
-DEF_HELPER_0(mfc0_debug, tl)
-DEF_HELPER_0(mftc0_debug, tl)
+DEF_HELPER_1(mfc0_mvpcontrol, tl, env)
+DEF_HELPER_1(mfc0_mvpconf0, tl, env)
+DEF_HELPER_1(mfc0_mvpconf1, tl, env)
+DEF_HELPER_1(mftc0_vpecontrol, tl, env)
+DEF_HELPER_1(mftc0_vpeconf0, tl, env)
+DEF_HELPER_1(mfc0_random, tl, env)
+DEF_HELPER_1(mfc0_tcstatus, tl, env)
+DEF_HELPER_1(mftc0_tcstatus, tl, env)
+DEF_HELPER_1(mfc0_tcbind, tl, env)
+DEF_HELPER_1(mftc0_tcbind, tl, env)
+DEF_HELPER_1(mfc0_tcrestart, tl, env)
+DEF_HELPER_1(mftc0_tcrestart, tl, env)
+DEF_HELPER_1(mfc0_tchalt, tl, env)
+DEF_HELPER_1(mftc0_tchalt, tl, env)
+DEF_HELPER_1(mfc0_tccontext, tl, env)
+DEF_HELPER_1(mftc0_tccontext, tl, env)
+DEF_HELPER_1(mfc0_tcschedule, tl, env)
+DEF_HELPER_1(mftc0_tcschedule, tl, env)
+DEF_HELPER_1(mfc0_tcschefback, tl, env)
+DEF_HELPER_1(mftc0_tcschefback, tl, env)
+DEF_HELPER_1(mfc0_count, tl, env)
+DEF_HELPER_1(mftc0_entryhi, tl, env)
+DEF_HELPER_1(mftc0_status, tl, env)
+DEF_HELPER_1(mftc0_cause, tl, env)
+DEF_HELPER_1(mftc0_epc, tl, env)
+DEF_HELPER_1(mftc0_ebase, tl, env)
+DEF_HELPER_2(mftc0_configx, tl, env, tl)
+DEF_HELPER_1(mfc0_lladdr, tl, env)
+DEF_HELPER_2(mfc0_watchlo, tl, env, i32)
+DEF_HELPER_2(mfc0_watchhi, tl, env, i32)
+DEF_HELPER_1(mfc0_debug, tl, env)
+DEF_HELPER_1(mftc0_debug, tl, env)
#ifdef TARGET_MIPS64
-DEF_HELPER_0(dmfc0_tcrestart, tl)
-DEF_HELPER_0(dmfc0_tchalt, tl)
-DEF_HELPER_0(dmfc0_tccontext, tl)
-DEF_HELPER_0(dmfc0_tcschedule, tl)
-DEF_HELPER_0(dmfc0_tcschefback, tl)
-DEF_HELPER_0(dmfc0_lladdr, tl)
-DEF_HELPER_1(dmfc0_watchlo, tl, i32)
+DEF_HELPER_1(dmfc0_tcrestart, tl, env)
+DEF_HELPER_1(dmfc0_tchalt, tl, env)
+DEF_HELPER_1(dmfc0_tccontext, tl, env)
+DEF_HELPER_1(dmfc0_tcschedule, tl, env)
+DEF_HELPER_1(dmfc0_tcschefback, tl, env)
+DEF_HELPER_1(dmfc0_lladdr, tl, env)
+DEF_HELPER_2(dmfc0_watchlo, tl, env, i32)
#endif /* TARGET_MIPS64 */
-DEF_HELPER_1(mtc0_index, void, tl)
-DEF_HELPER_1(mtc0_mvpcontrol, void, tl)
-DEF_HELPER_1(mtc0_vpecontrol, void, tl)
-DEF_HELPER_1(mttc0_vpecontrol, void, tl)
-DEF_HELPER_1(mtc0_vpeconf0, void, tl)
-DEF_HELPER_1(mttc0_vpeconf0, void, tl)
-DEF_HELPER_1(mtc0_vpeconf1, void, tl)
-DEF_HELPER_1(mtc0_yqmask, void, tl)
-DEF_HELPER_1(mtc0_vpeopt, void, tl)
-DEF_HELPER_1(mtc0_entrylo0, void, tl)
-DEF_HELPER_1(mtc0_tcstatus, void, tl)
-DEF_HELPER_1(mttc0_tcstatus, void, tl)
-DEF_HELPER_1(mtc0_tcbind, void, tl)
-DEF_HELPER_1(mttc0_tcbind, void, tl)
-DEF_HELPER_1(mtc0_tcrestart, void, tl)
-DEF_HELPER_1(mttc0_tcrestart, void, tl)
-DEF_HELPER_1(mtc0_tchalt, void, tl)
-DEF_HELPER_1(mttc0_tchalt, void, tl)
-DEF_HELPER_1(mtc0_tccontext, void, tl)
-DEF_HELPER_1(mttc0_tccontext, void, tl)
-DEF_HELPER_1(mtc0_tcschedule, void, tl)
-DEF_HELPER_1(mttc0_tcschedule, void, tl)
-DEF_HELPER_1(mtc0_tcschefback, void, tl)
-DEF_HELPER_1(mttc0_tcschefback, void, tl)
-DEF_HELPER_1(mtc0_entrylo1, void, tl)
-DEF_HELPER_1(mtc0_context, void, tl)
-DEF_HELPER_1(mtc0_pagemask, void, tl)
-DEF_HELPER_1(mtc0_pagegrain, void, tl)
-DEF_HELPER_1(mtc0_wired, void, tl)
-DEF_HELPER_1(mtc0_srsconf0, void, tl)
-DEF_HELPER_1(mtc0_srsconf1, void, tl)
-DEF_HELPER_1(mtc0_srsconf2, void, tl)
-DEF_HELPER_1(mtc0_srsconf3, void, tl)
-DEF_HELPER_1(mtc0_srsconf4, void, tl)
-DEF_HELPER_1(mtc0_hwrena, void, tl)
-DEF_HELPER_1(mtc0_count, void, tl)
-DEF_HELPER_1(mtc0_entryhi, void, tl)
-DEF_HELPER_1(mttc0_entryhi, void, tl)
-DEF_HELPER_1(mtc0_compare, void, tl)
-DEF_HELPER_1(mtc0_status, void, tl)
-DEF_HELPER_1(mttc0_status, void, tl)
-DEF_HELPER_1(mtc0_intctl, void, tl)
-DEF_HELPER_1(mtc0_srsctl, void, tl)
-DEF_HELPER_1(mtc0_cause, void, tl)
-DEF_HELPER_1(mttc0_cause, void, tl)
-DEF_HELPER_1(mtc0_ebase, void, tl)
-DEF_HELPER_1(mttc0_ebase, void, tl)
-DEF_HELPER_1(mtc0_config0, void, tl)
-DEF_HELPER_1(mtc0_config2, void, tl)
-DEF_HELPER_1(mtc0_lladdr, void, tl)
-DEF_HELPER_2(mtc0_watchlo, void, tl, i32)
-DEF_HELPER_2(mtc0_watchhi, void, tl, i32)
-DEF_HELPER_1(mtc0_xcontext, void, tl)
-DEF_HELPER_1(mtc0_framemask, void, tl)
-DEF_HELPER_1(mtc0_debug, void, tl)
-DEF_HELPER_1(mttc0_debug, void, tl)
-DEF_HELPER_1(mtc0_performance0, void, tl)
-DEF_HELPER_1(mtc0_taglo, void, tl)
-DEF_HELPER_1(mtc0_datalo, void, tl)
-DEF_HELPER_1(mtc0_taghi, void, tl)
-DEF_HELPER_1(mtc0_datahi, void, tl)
+DEF_HELPER_2(mtc0_index, void, env, tl)
+DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl)
+DEF_HELPER_2(mtc0_vpecontrol, void, env, tl)
+DEF_HELPER_2(mttc0_vpecontrol, void, env, tl)
+DEF_HELPER_2(mtc0_vpeconf0, void, env, tl)
+DEF_HELPER_2(mttc0_vpeconf0, void, env, tl)
+DEF_HELPER_2(mtc0_vpeconf1, void, env, tl)
+DEF_HELPER_2(mtc0_yqmask, void, env, tl)
+DEF_HELPER_2(mtc0_vpeopt, void, env, tl)
+DEF_HELPER_2(mtc0_entrylo0, void, env, tl)
+DEF_HELPER_2(mtc0_tcstatus, void, env, tl)
+DEF_HELPER_2(mttc0_tcstatus, void, env, tl)
+DEF_HELPER_2(mtc0_tcbind, void, env, tl)
+DEF_HELPER_2(mttc0_tcbind, void, env, tl)
+DEF_HELPER_2(mtc0_tcrestart, void, env, tl)
+DEF_HELPER_2(mttc0_tcrestart, void, env, tl)
+DEF_HELPER_2(mtc0_tchalt, void, env, tl)
+DEF_HELPER_2(mttc0_tchalt, void, env, tl)
+DEF_HELPER_2(mtc0_tccontext, void, env, tl)
+DEF_HELPER_2(mttc0_tccontext, void, env, tl)
+DEF_HELPER_2(mtc0_tcschedule, void, env, tl)
+DEF_HELPER_2(mttc0_tcschedule, void, env, tl)
+DEF_HELPER_2(mtc0_tcschefback, void, env, tl)
+DEF_HELPER_2(mttc0_tcschefback, void, env, tl)
+DEF_HELPER_2(mtc0_entrylo1, void, env, tl)
+DEF_HELPER_2(mtc0_context, void, env, tl)
+DEF_HELPER_2(mtc0_pagemask, void, env, tl)
+DEF_HELPER_2(mtc0_pagegrain, void, env, tl)
+DEF_HELPER_2(mtc0_wired, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf0, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf1, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf2, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf3, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf4, void, env, tl)
+DEF_HELPER_2(mtc0_hwrena, void, env, tl)
+DEF_HELPER_2(mtc0_count, void, env, tl)
+DEF_HELPER_2(mtc0_entryhi, void, env, tl)
+DEF_HELPER_2(mttc0_entryhi, void, env, tl)
+DEF_HELPER_2(mtc0_compare, void, env, tl)
+DEF_HELPER_2(mtc0_status, void, env, tl)
+DEF_HELPER_2(mttc0_status, void, env, tl)
+DEF_HELPER_2(mtc0_intctl, void, env, tl)
+DEF_HELPER_2(mtc0_srsctl, void, env, tl)
+DEF_HELPER_2(mtc0_cause, void, env, tl)
+DEF_HELPER_2(mttc0_cause, void, env, tl)
+DEF_HELPER_2(mtc0_ebase, void, env, tl)
+DEF_HELPER_2(mttc0_ebase, void, env, tl)
+DEF_HELPER_2(mtc0_config0, void, env, tl)
+DEF_HELPER_2(mtc0_config2, void, env, tl)
+DEF_HELPER_2(mtc0_lladdr, void, env, tl)
+DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32)
+DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32)
+DEF_HELPER_2(mtc0_xcontext, void, env, tl)
+DEF_HELPER_2(mtc0_framemask, void, env, tl)
+DEF_HELPER_2(mtc0_debug, void, env, tl)
+DEF_HELPER_2(mttc0_debug, void, env, tl)
+DEF_HELPER_2(mtc0_performance0, void, env, tl)
+DEF_HELPER_2(mtc0_taglo, void, env, tl)
+DEF_HELPER_2(mtc0_datalo, void, env, tl)
+DEF_HELPER_2(mtc0_taghi, void, env, tl)
+DEF_HELPER_2(mtc0_datahi, void, env, tl)
/* MIPS MT functions */
-DEF_HELPER_1(mftgpr, tl, i32);
-DEF_HELPER_1(mftlo, tl, i32)
-DEF_HELPER_1(mfthi, tl, i32)
-DEF_HELPER_1(mftacx, tl, i32)
-DEF_HELPER_0(mftdsp, tl)
-DEF_HELPER_2(mttgpr, void, tl, i32)
-DEF_HELPER_2(mttlo, void, tl, i32)
-DEF_HELPER_2(mtthi, void, tl, i32)
-DEF_HELPER_2(mttacx, void, tl, i32)
-DEF_HELPER_1(mttdsp, void, tl)
+DEF_HELPER_2(mftgpr, tl, env, i32);
+DEF_HELPER_2(mftlo, tl, env, i32)
+DEF_HELPER_2(mfthi, tl, env, i32)
+DEF_HELPER_2(mftacx, tl, env, i32)
+DEF_HELPER_1(mftdsp, tl, env)
+DEF_HELPER_3(mttgpr, void, env, tl, i32)
+DEF_HELPER_3(mttlo, void, env, tl, i32)
+DEF_HELPER_3(mtthi, void, env, tl, i32)
+DEF_HELPER_3(mttacx, void, env, tl, i32)
+DEF_HELPER_2(mttdsp, void, env, tl)
DEF_HELPER_0(dmt, tl)
DEF_HELPER_0(emt, tl)
-DEF_HELPER_0(dvpe, tl)
-DEF_HELPER_0(evpe, tl)
+DEF_HELPER_1(dvpe, tl, env)
+DEF_HELPER_1(evpe, tl, env)
#endif /* !CONFIG_USER_ONLY */
/* microMIPS functions */
-DEF_HELPER_3(lwm, void, tl, tl, i32);
-DEF_HELPER_3(swm, void, tl, tl, i32);
+DEF_HELPER_4(lwm, void, env, tl, tl, i32);
+DEF_HELPER_4(swm, void, env, tl, tl, i32);
#ifdef TARGET_MIPS64
-DEF_HELPER_3(ldm, void, tl, tl, i32);
-DEF_HELPER_3(sdm, void, tl, tl, i32);
+DEF_HELPER_4(ldm, void, env, tl, tl, i32);
+DEF_HELPER_4(sdm, void, env, tl, tl, i32);
#endif
DEF_HELPER_2(fork, void, tl, tl)
-DEF_HELPER_1(yield, tl, tl)
+DEF_HELPER_2(yield, tl, env, tl)
/* CP1 functions */
-DEF_HELPER_1(cfc1, tl, i32)
-DEF_HELPER_2(ctc1, void, tl, i32)
+DEF_HELPER_2(cfc1, tl, env, i32)
+DEF_HELPER_3(ctc1, void, env, tl, i32)
-DEF_HELPER_1(float_cvtd_s, i64, i32)
-DEF_HELPER_1(float_cvtd_w, i64, i32)
-DEF_HELPER_1(float_cvtd_l, i64, i64)
-DEF_HELPER_1(float_cvtl_d, i64, i64)
-DEF_HELPER_1(float_cvtl_s, i64, i32)
-DEF_HELPER_1(float_cvtps_pw, i64, i64)
-DEF_HELPER_1(float_cvtpw_ps, i64, i64)
-DEF_HELPER_1(float_cvts_d, i32, i64)
-DEF_HELPER_1(float_cvts_w, i32, i32)
-DEF_HELPER_1(float_cvts_l, i32, i64)
-DEF_HELPER_1(float_cvts_pl, i32, i32)
-DEF_HELPER_1(float_cvts_pu, i32, i32)
-DEF_HELPER_1(float_cvtw_s, i32, i32)
-DEF_HELPER_1(float_cvtw_d, i32, i64)
+DEF_HELPER_2(float_cvtd_s, i64, env, i32)
+DEF_HELPER_2(float_cvtd_w, i64, env, i32)
+DEF_HELPER_2(float_cvtd_l, i64, env, i64)
+DEF_HELPER_2(float_cvtl_d, i64, env, i64)
+DEF_HELPER_2(float_cvtl_s, i64, env, i32)
+DEF_HELPER_2(float_cvtps_pw, i64, env, i64)
+DEF_HELPER_2(float_cvtpw_ps, i64, env, i64)
+DEF_HELPER_2(float_cvts_d, i32, env, i64)
+DEF_HELPER_2(float_cvts_w, i32, env, i32)
+DEF_HELPER_2(float_cvts_l, i32, env, i64)
+DEF_HELPER_2(float_cvts_pl, i32, env, i32)
+DEF_HELPER_2(float_cvts_pu, i32, env, i32)
+DEF_HELPER_2(float_cvtw_s, i32, env, i32)
+DEF_HELPER_2(float_cvtw_d, i32, env, i64)
-DEF_HELPER_2(float_addr_ps, i64, i64, i64)
-DEF_HELPER_2(float_mulr_ps, i64, i64, i64)
+DEF_HELPER_3(float_addr_ps, i64, env, i64, i64)
+DEF_HELPER_3(float_mulr_ps, i64, env, i64, i64)
-#define FOP_PROTO(op) \
-DEF_HELPER_1(float_ ## op ## l_s, i64, i32) \
-DEF_HELPER_1(float_ ## op ## l_d, i64, i64) \
-DEF_HELPER_1(float_ ## op ## w_s, i32, i32) \
-DEF_HELPER_1(float_ ## op ## w_d, i32, i64)
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## l_s, i64, env, i32) \
+DEF_HELPER_2(float_ ## op ## l_d, i64, env, i64) \
+DEF_HELPER_2(float_ ## op ## w_s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## w_d, i32, env, i64)
FOP_PROTO(round)
FOP_PROTO(trunc)
FOP_PROTO(ceil)
FOP_PROTO(floor)
#undef FOP_PROTO
-#define FOP_PROTO(op) \
-DEF_HELPER_1(float_ ## op ## _s, i32, i32) \
-DEF_HELPER_1(float_ ## op ## _d, i64, i64)
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _d, i64, env, i64)
FOP_PROTO(sqrt)
FOP_PROTO(rsqrt)
FOP_PROTO(recip)
DEF_HELPER_1(float_ ## op ## _ps, i64, i64)
FOP_PROTO(abs)
FOP_PROTO(chs)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _d, i64, env, i64) \
+DEF_HELPER_2(float_ ## op ## _ps, i64, env, i64)
FOP_PROTO(recip1)
FOP_PROTO(rsqrt1)
#undef FOP_PROTO
-#define FOP_PROTO(op) \
-DEF_HELPER_2(float_ ## op ## _s, i32, i32, i32) \
-DEF_HELPER_2(float_ ## op ## _d, i64, i64, i64) \
-DEF_HELPER_2(float_ ## op ## _ps, i64, i64, i64)
+#define FOP_PROTO(op) \
+DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \
+DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64) \
+DEF_HELPER_3(float_ ## op ## _ps, i64, env, i64, i64)
FOP_PROTO(add)
FOP_PROTO(sub)
FOP_PROTO(mul)
FOP_PROTO(rsqrt2)
#undef FOP_PROTO
-#define FOP_PROTO(op) \
-DEF_HELPER_3(float_ ## op ## _s, i32, i32, i32, i32) \
-DEF_HELPER_3(float_ ## op ## _d, i64, i64, i64, i64) \
-DEF_HELPER_3(float_ ## op ## _ps, i64, i64, i64, i64)
+#define FOP_PROTO(op) \
+DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \
+DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64) \
+DEF_HELPER_4(float_ ## op ## _ps, i64, env, i64, i64, i64)
FOP_PROTO(muladd)
FOP_PROTO(mulsub)
FOP_PROTO(nmuladd)
FOP_PROTO(nmulsub)
#undef FOP_PROTO
-#define FOP_PROTO(op) \
-DEF_HELPER_3(cmp_d_ ## op, void, i64, i64, int) \
-DEF_HELPER_3(cmpabs_d_ ## op, void, i64, i64, int) \
-DEF_HELPER_3(cmp_s_ ## op, void, i32, i32, int) \
-DEF_HELPER_3(cmpabs_s_ ## op, void, i32, i32, int) \
-DEF_HELPER_3(cmp_ps_ ## op, void, i64, i64, int) \
-DEF_HELPER_3(cmpabs_ps_ ## op, void, i64, i64, int)
+#define FOP_PROTO(op) \
+DEF_HELPER_4(cmp_d_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmpabs_d_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmp_s_ ## op, void, env, i32, i32, int) \
+DEF_HELPER_4(cmpabs_s_ ## op, void, env, i32, i32, int) \
+DEF_HELPER_4(cmp_ps_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmpabs_ps_ ## op, void, env, i64, i64, int)
FOP_PROTO(f)
FOP_PROTO(un)
FOP_PROTO(eq)
/* Special functions */
#ifndef CONFIG_USER_ONLY
-DEF_HELPER_0(tlbwi, void)
-DEF_HELPER_0(tlbwr, void)
-DEF_HELPER_0(tlbp, void)
-DEF_HELPER_0(tlbr, void)
-DEF_HELPER_0(di, tl)
-DEF_HELPER_0(ei, tl)
-DEF_HELPER_0(eret, void)
-DEF_HELPER_0(deret, void)
+DEF_HELPER_1(tlbwi, void, env)
+DEF_HELPER_1(tlbwr, void, env)
+DEF_HELPER_1(tlbp, void, env)
+DEF_HELPER_1(tlbr, void, env)
+DEF_HELPER_1(di, tl, env)
+DEF_HELPER_1(ei, tl, env)
+DEF_HELPER_1(eret, void, env)
+DEF_HELPER_1(deret, void, env)
#endif /* !CONFIG_USER_ONLY */
-DEF_HELPER_0(rdhwr_cpunum, tl)
-DEF_HELPER_0(rdhwr_synci_step, tl)
-DEF_HELPER_0(rdhwr_cc, tl)
-DEF_HELPER_0(rdhwr_ccres, tl)
-DEF_HELPER_1(pmon, void, int)
-DEF_HELPER_0(wait, void)
+DEF_HELPER_1(rdhwr_cpunum, tl, env)
+DEF_HELPER_1(rdhwr_synci_step, tl, env)
+DEF_HELPER_1(rdhwr_cc, tl, env)
+DEF_HELPER_1(rdhwr_ccres, tl, env)
+DEF_HELPER_2(pmon, void, env, int)
+DEF_HELPER_1(wait, void, env)
#include "def-helper.h"
*/
#include <stdlib.h>
#include "cpu.h"
-#include "dyngen-exec.h"
-
#include "host-utils.h"
#include "helper.h"
/*****************************************************************************/
/* Exceptions processing helpers */
-void helper_raise_exception_err (uint32_t exception, int error_code)
+void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
+ int error_code)
{
#if 1
if (exception < 0x100)
cpu_loop_exit(env);
}
-void helper_raise_exception (uint32_t exception)
+void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
{
- helper_raise_exception_err(exception, 0);
+ helper_raise_exception_err(env, exception, 0);
}
#if !defined(CONFIG_USER_ONLY)
-static void do_restore_state(uintptr_t pc)
+static void do_restore_state(CPUMIPSState *env, uintptr_t pc)
{
TranslationBlock *tb;
#if defined(CONFIG_USER_ONLY)
#define HELPER_LD(name, insn, type) \
-static inline type do_##name(target_ulong addr, int mem_idx) \
+static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
+ int mem_idx) \
{ \
return (type) insn##_raw(addr); \
}
#else
#define HELPER_LD(name, insn, type) \
-static inline type do_##name(target_ulong addr, int mem_idx) \
+static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
+ int mem_idx) \
{ \
switch (mem_idx) \
{ \
- case 0: return (type) insn##_kernel(addr); break; \
- case 1: return (type) insn##_super(addr); break; \
+ case 0: return (type) cpu_##insn##_kernel(env, addr); break; \
+ case 1: return (type) cpu_##insn##_super(env, addr); break; \
default: \
- case 2: return (type) insn##_user(addr); break; \
+ case 2: return (type) cpu_##insn##_user(env, addr); break; \
} \
}
#endif
#if defined(CONFIG_USER_ONLY)
#define HELPER_ST(name, insn, type) \
-static inline void do_##name(target_ulong addr, type val, int mem_idx) \
+static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
+ type val, int mem_idx) \
{ \
insn##_raw(addr, val); \
}
#else
#define HELPER_ST(name, insn, type) \
-static inline void do_##name(target_ulong addr, type val, int mem_idx) \
+static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
+ type val, int mem_idx) \
{ \
switch (mem_idx) \
{ \
- case 0: insn##_kernel(addr, val); break; \
- case 1: insn##_super(addr, val); break; \
+ case 0: cpu_##insn##_kernel(env, addr, val); break; \
+ case 1: cpu_##insn##_super(env, addr, val); break; \
default: \
- case 2: insn##_user(addr, val); break; \
+ case 2: cpu_##insn##_user(env, addr, val); break; \
} \
}
#endif
#endif /* TARGET_MIPS64 */
/* 64 bits arithmetic for 32 bits hosts */
-static inline uint64_t get_HILO (void)
+static inline uint64_t get_HILO(CPUMIPSState *env)
{
return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
}
-static inline target_ulong set_HIT0_LO(uint64_t HILO)
+static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
{
target_ulong tmp;
env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
return tmp;
}
-static inline target_ulong set_HI_LOT0(uint64_t HILO)
+static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
{
target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
env->active_tc.HI[0] = (int32_t)(HILO >> 32);
}
/* Multiplication variants of the vr54xx. */
-target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
+target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HI_LOT0(0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
+ return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2));
}
-target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
+target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HI_LOT0(0 - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
+ return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
}
-target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
+target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HI_LOT0((int64_t)get_HILO() + (int64_t)(int32_t)arg1 *
- (int64_t)(int32_t)arg2);
+ return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
}
-target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
+target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HIT0_LO((int64_t)get_HILO() + (int64_t)(int32_t)arg1 *
- (int64_t)(int32_t)arg2);
+ return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
}
-target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
+target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HI_LOT0((uint64_t)get_HILO() + (uint64_t)(uint32_t)arg1 *
- (uint64_t)(uint32_t)arg2);
+ return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
+ (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
}
-target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
+target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HIT0_LO((uint64_t)get_HILO() + (uint64_t)(uint32_t)arg1 *
- (uint64_t)(uint32_t)arg2);
+ return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
+ (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
}
-target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
+target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HI_LOT0((int64_t)get_HILO() - (int64_t)(int32_t)arg1 *
- (int64_t)(int32_t)arg2);
+ return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
}
-target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
+target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HIT0_LO((int64_t)get_HILO() - (int64_t)(int32_t)arg1 *
- (int64_t)(int32_t)arg2);
+ return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
}
-target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
+target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HI_LOT0((uint64_t)get_HILO() - (uint64_t)(uint32_t)arg1 *
- (uint64_t)(uint32_t)arg2);
+ return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
+ (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
}
-target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
+target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HIT0_LO((uint64_t)get_HILO() - (uint64_t)(uint32_t)arg1 *
- (uint64_t)(uint32_t)arg2);
+ return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
+ (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
}
-target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
+target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HIT0_LO((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
+ return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
}
-target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
+target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HIT0_LO((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
+ return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
}
-target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
+target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HIT0_LO(0 - (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
+ return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
}
-target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
+target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
{
- return set_HIT0_LO(0 - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
+ return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
}
#ifdef TARGET_MIPS64
-void helper_dmult (target_ulong arg1, target_ulong arg2)
+void helper_dmult(CPUMIPSState *env, target_ulong arg1, target_ulong arg2)
{
muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
}
-void helper_dmultu (target_ulong arg1, target_ulong arg2)
+void helper_dmultu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2)
{
mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
}
#ifndef CONFIG_USER_ONLY
-static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
+static inline target_phys_addr_t do_translate_address(CPUMIPSState *env,
+ target_ulong address,
+ int rw)
{
target_phys_addr_t lladdr;
}
#define HELPER_LD_ATOMIC(name, insn) \
-target_ulong helper_##name(target_ulong arg, int mem_idx) \
+target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
{ \
- env->lladdr = do_translate_address(arg, 0); \
- env->llval = do_##insn(arg, mem_idx); \
+ env->lladdr = do_translate_address(env, arg, 0); \
+ env->llval = do_##insn(env, arg, mem_idx); \
return env->llval; \
}
HELPER_LD_ATOMIC(ll, lw)
#undef HELPER_LD_ATOMIC
#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
-target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
+target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \
+ target_ulong arg2, int mem_idx) \
{ \
target_long tmp; \
\
if (arg2 & almask) { \
env->CP0_BadVAddr = arg2; \
- helper_raise_exception(EXCP_AdES); \
+ helper_raise_exception(env, EXCP_AdES); \
} \
- if (do_translate_address(arg2, 1) == env->lladdr) { \
- tmp = do_##ld_insn(arg2, mem_idx); \
+ if (do_translate_address(env, arg2, 1) == env->lladdr) { \
+ tmp = do_##ld_insn(env, arg2, mem_idx); \
if (tmp == env->llval) { \
- do_##st_insn(arg2, arg1, mem_idx); \
+ do_##st_insn(env, arg2, arg1, mem_idx); \
return 1; \
} \
} \
#define GET_OFFSET(addr, offset) (addr - (offset))
#endif
-target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
+target_ulong helper_lwl(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2, int mem_idx)
{
target_ulong tmp;
- tmp = do_lbu(arg2, mem_idx);
+ tmp = do_lbu(env, arg2, mem_idx);
arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
if (GET_LMASK(arg2) <= 2) {
- tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx);
arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
}
if (GET_LMASK(arg2) <= 1) {
- tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx);
arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
}
if (GET_LMASK(arg2) == 0) {
- tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx);
arg1 = (arg1 & 0xFFFFFF00) | tmp;
}
return (int32_t)arg1;
}
-target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
+target_ulong helper_lwr(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2, int mem_idx)
{
target_ulong tmp;
- tmp = do_lbu(arg2, mem_idx);
+ tmp = do_lbu(env, arg2, mem_idx);
arg1 = (arg1 & 0xFFFFFF00) | tmp;
if (GET_LMASK(arg2) >= 1) {
- tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -1), mem_idx);
arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
}
if (GET_LMASK(arg2) >= 2) {
- tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -2), mem_idx);
arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
}
if (GET_LMASK(arg2) == 3) {
- tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -3), mem_idx);
arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
}
return (int32_t)arg1;
}
-void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
+void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
{
- do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
+ do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx);
if (GET_LMASK(arg2) <= 2)
- do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
if (GET_LMASK(arg2) <= 1)
- do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
if (GET_LMASK(arg2) == 0)
- do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
}
-void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
+void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
{
- do_sb(arg2, (uint8_t)arg1, mem_idx);
+ do_sb(env, arg2, (uint8_t)arg1, mem_idx);
if (GET_LMASK(arg2) >= 1)
- do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
if (GET_LMASK(arg2) >= 2)
- do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
if (GET_LMASK(arg2) == 3)
- do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
}
#if defined(TARGET_MIPS64)
#define GET_LMASK64(v) (((v) & 7) ^ 7)
#endif
-target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
+target_ulong helper_ldl(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2, int mem_idx)
{
uint64_t tmp;
- tmp = do_lbu(arg2, mem_idx);
+ tmp = do_lbu(env, arg2, mem_idx);
arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
if (GET_LMASK64(arg2) <= 6) {
- tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx);
arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
}
if (GET_LMASK64(arg2) <= 5) {
- tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx);
arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
}
if (GET_LMASK64(arg2) <= 4) {
- tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx);
arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
}
if (GET_LMASK64(arg2) <= 3) {
- tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 4), mem_idx);
arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
}
if (GET_LMASK64(arg2) <= 2) {
- tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 5), mem_idx);
arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
}
if (GET_LMASK64(arg2) <= 1) {
- tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 6), mem_idx);
arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
}
if (GET_LMASK64(arg2) == 0) {
- tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, 7), mem_idx);
arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
}
return arg1;
}
-target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
+target_ulong helper_ldr(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2, int mem_idx)
{
uint64_t tmp;
- tmp = do_lbu(arg2, mem_idx);
+ tmp = do_lbu(env, arg2, mem_idx);
arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
if (GET_LMASK64(arg2) >= 1) {
- tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -1), mem_idx);
arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
}
if (GET_LMASK64(arg2) >= 2) {
- tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -2), mem_idx);
arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
}
if (GET_LMASK64(arg2) >= 3) {
- tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -3), mem_idx);
arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
}
if (GET_LMASK64(arg2) >= 4) {
- tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -4), mem_idx);
arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
}
if (GET_LMASK64(arg2) >= 5) {
- tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -5), mem_idx);
arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
}
if (GET_LMASK64(arg2) >= 6) {
- tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -6), mem_idx);
arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
}
if (GET_LMASK64(arg2) == 7) {
- tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
+ tmp = do_lbu(env, GET_OFFSET(arg2, -7), mem_idx);
arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
}
return arg1;
}
-void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
+void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
{
- do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
+ do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx);
if (GET_LMASK64(arg2) <= 6)
- do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
if (GET_LMASK64(arg2) <= 5)
- do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
if (GET_LMASK64(arg2) <= 4)
- do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
if (GET_LMASK64(arg2) <= 3)
- do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
if (GET_LMASK64(arg2) <= 2)
- do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
if (GET_LMASK64(arg2) <= 1)
- do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
if (GET_LMASK64(arg2) <= 0)
- do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
+ do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
}
-void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
+void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
{
- do_sb(arg2, (uint8_t)arg1, mem_idx);
+ do_sb(env, arg2, (uint8_t)arg1, mem_idx);
if (GET_LMASK64(arg2) >= 1)
- do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
if (GET_LMASK64(arg2) >= 2)
- do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
if (GET_LMASK64(arg2) >= 3)
- do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
if (GET_LMASK64(arg2) >= 4)
- do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
if (GET_LMASK64(arg2) >= 5)
- do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
if (GET_LMASK64(arg2) >= 6)
- do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
if (GET_LMASK64(arg2) == 7)
- do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
+ do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
}
#endif /* TARGET_MIPS64 */
static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
-void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
+void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
{
target_ulong base_reglist = reglist & 0xf;
target_ulong do_r31 = reglist & 0x10;
#ifdef CONFIG_USER_ONLY
#undef ldfun
-#define ldfun ldl_raw
+#define ldfun(env, addr) ldl_raw(addr)
#else
- uint32_t (*ldfun)(target_ulong);
+ uint32_t (*ldfun)(CPUMIPSState *env, target_ulong);
switch (mem_idx)
{
- case 0: ldfun = ldl_kernel; break;
- case 1: ldfun = ldl_super; break;
+ case 0: ldfun = cpu_ldl_kernel; break;
+ case 1: ldfun = cpu_ldl_super; break;
default:
- case 2: ldfun = ldl_user; break;
+ case 2: ldfun = cpu_ldl_user; break;
}
#endif
target_ulong i;
for (i = 0; i < base_reglist; i++) {
- env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
+ env->active_tc.gpr[multiple_regs[i]] = (target_long)ldfun(env, addr);
addr += 4;
}
}
if (do_r31) {
- env->active_tc.gpr[31] = (target_long) ldfun(addr);
+ env->active_tc.gpr[31] = (target_long)ldfun(env, addr);
}
}
-void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
+void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
{
target_ulong base_reglist = reglist & 0xf;
target_ulong do_r31 = reglist & 0x10;
#ifdef CONFIG_USER_ONLY
#undef stfun
-#define stfun stl_raw
+#define stfun(env, addr, val) stl_raw(addr, val)
#else
- void (*stfun)(target_ulong, uint32_t);
+ void (*stfun)(CPUMIPSState *env, target_ulong, uint32_t);
switch (mem_idx)
{
- case 0: stfun = stl_kernel; break;
- case 1: stfun = stl_super; break;
+ case 0: stfun = cpu_stl_kernel; break;
+ case 1: stfun = cpu_stl_super; break;
default:
- case 2: stfun = stl_user; break;
+ case 2: stfun = cpu_stl_user; break;
}
#endif
target_ulong i;
for (i = 0; i < base_reglist; i++) {
- stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
+ stfun(env, addr, env->active_tc.gpr[multiple_regs[i]]);
addr += 4;
}
}
if (do_r31) {
- stfun(addr, env->active_tc.gpr[31]);
+ stfun(env, addr, env->active_tc.gpr[31]);
}
}
#if defined(TARGET_MIPS64)
-void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
+void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
{
target_ulong base_reglist = reglist & 0xf;
target_ulong do_r31 = reglist & 0x10;
#ifdef CONFIG_USER_ONLY
#undef ldfun
-#define ldfun ldq_raw
+#define ldfun(env, addr) ldq_raw(addr)
#else
- uint64_t (*ldfun)(target_ulong);
+ uint64_t (*ldfun)(CPUMIPSState *env, target_ulong);
switch (mem_idx)
{
- case 0: ldfun = ldq_kernel; break;
- case 1: ldfun = ldq_super; break;
+ case 0: ldfun = cpu_ldq_kernel; break;
+ case 1: ldfun = cpu_ldq_super; break;
default:
- case 2: ldfun = ldq_user; break;
+ case 2: ldfun = cpu_ldq_user; break;
}
#endif
target_ulong i;
for (i = 0; i < base_reglist; i++) {
- env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
+ env->active_tc.gpr[multiple_regs[i]] = ldfun(env, addr);
addr += 8;
}
}
if (do_r31) {
- env->active_tc.gpr[31] = ldfun(addr);
+ env->active_tc.gpr[31] = ldfun(env, addr);
}
}
-void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
+void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
{
target_ulong base_reglist = reglist & 0xf;
target_ulong do_r31 = reglist & 0x10;
#ifdef CONFIG_USER_ONLY
#undef stfun
-#define stfun stq_raw
+#define stfun(env, addr, val) stq_raw(addr, val)
#else
- void (*stfun)(target_ulong, uint64_t);
+ void (*stfun)(CPUMIPSState *env, target_ulong, uint64_t);
switch (mem_idx)
{
- case 0: stfun = stq_kernel; break;
- case 1: stfun = stq_super; break;
+ case 0: stfun = cpu_stq_kernel; break;
+ case 1: stfun = cpu_stq_super; break;
default:
- case 2: stfun = stq_user; break;
+ case 2: stfun = cpu_stq_user; break;
}
#endif
target_ulong i;
for (i = 0; i < base_reglist; i++) {
- stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
+ stfun(env, addr, env->active_tc.gpr[multiple_regs[i]]);
addr += 8;
}
}
if (do_r31) {
- stfun(addr, env->active_tc.gpr[31]);
+ stfun(env, addr, env->active_tc.gpr[31]);
}
}
#endif
FIXME: This code assumes that all VPEs have the same number of TCs,
which depends on runtime setup. Can probably be fixed by
walking the list of CPUMIPSStates. */
-static CPUMIPSState *mips_cpu_map_tc(int *tc)
+static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
{
CPUMIPSState *other;
int vpe_idx, nr_threads = env->nr_threads;
These helper call synchronizes the regs for a given cpu. */
/* Called for updates to CP0_Status. */
-static void sync_c0_status(CPUMIPSState *cpu, int tc)
+static void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
{
int32_t tcstatus, *tcst;
uint32_t v = cpu->CP0_Status;
}
/* Called for updates to CP0_TCStatus. */
-static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, target_ulong v)
+static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
+ target_ulong v)
{
uint32_t status;
uint32_t tcu, tmx, tasid, tksu;
}
/* CP0 helpers */
-target_ulong helper_mfc0_mvpcontrol (void)
+target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
{
return env->mvp->CP0_MVPControl;
}
-target_ulong helper_mfc0_mvpconf0 (void)
+target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
{
return env->mvp->CP0_MVPConf0;
}
-target_ulong helper_mfc0_mvpconf1 (void)
+target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
{
return env->mvp->CP0_MVPConf1;
}
-target_ulong helper_mfc0_random (void)
+target_ulong helper_mfc0_random(CPUMIPSState *env)
{
return (int32_t)cpu_mips_get_random(env);
}
-target_ulong helper_mfc0_tcstatus (void)
+target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
{
return env->active_tc.CP0_TCStatus;
}
-target_ulong helper_mftc0_tcstatus(void)
+target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.CP0_TCStatus;
return other->tcs[other_tc].CP0_TCStatus;
}
-target_ulong helper_mfc0_tcbind (void)
+target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
{
return env->active_tc.CP0_TCBind;
}
-target_ulong helper_mftc0_tcbind(void)
+target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.CP0_TCBind;
return other->tcs[other_tc].CP0_TCBind;
}
-target_ulong helper_mfc0_tcrestart (void)
+target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
{
return env->active_tc.PC;
}
-target_ulong helper_mftc0_tcrestart(void)
+target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.PC;
return other->tcs[other_tc].PC;
}
-target_ulong helper_mfc0_tchalt (void)
+target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
{
return env->active_tc.CP0_TCHalt;
}
-target_ulong helper_mftc0_tchalt(void)
+target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.CP0_TCHalt;
return other->tcs[other_tc].CP0_TCHalt;
}
-target_ulong helper_mfc0_tccontext (void)
+target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
{
return env->active_tc.CP0_TCContext;
}
-target_ulong helper_mftc0_tccontext(void)
+target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.CP0_TCContext;
return other->tcs[other_tc].CP0_TCContext;
}
-target_ulong helper_mfc0_tcschedule (void)
+target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
{
return env->active_tc.CP0_TCSchedule;
}
-target_ulong helper_mftc0_tcschedule(void)
+target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.CP0_TCSchedule;
return other->tcs[other_tc].CP0_TCSchedule;
}
-target_ulong helper_mfc0_tcschefback (void)
+target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
{
return env->active_tc.CP0_TCScheFBack;
}
-target_ulong helper_mftc0_tcschefback(void)
+target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.CP0_TCScheFBack;
return other->tcs[other_tc].CP0_TCScheFBack;
}
-target_ulong helper_mfc0_count (void)
+target_ulong helper_mfc0_count(CPUMIPSState *env)
{
return (int32_t)cpu_mips_get_count(env);
}
-target_ulong helper_mftc0_entryhi(void)
+target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
return other->CP0_EntryHi;
}
-target_ulong helper_mftc0_cause(void)
+target_ulong helper_mftc0_cause(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
int32_t tccause;
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc) {
tccause = other->CP0_Cause;
return tccause;
}
-target_ulong helper_mftc0_status(void)
+target_ulong helper_mftc0_status(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
return other->CP0_Status;
}
-target_ulong helper_mfc0_lladdr (void)
+target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
{
return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
}
-target_ulong helper_mfc0_watchlo (uint32_t sel)
+target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
{
return (int32_t)env->CP0_WatchLo[sel];
}
-target_ulong helper_mfc0_watchhi (uint32_t sel)
+target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
{
return env->CP0_WatchHi[sel];
}
-target_ulong helper_mfc0_debug (void)
+target_ulong helper_mfc0_debug(CPUMIPSState *env)
{
target_ulong t0 = env->CP0_Debug;
if (env->hflags & MIPS_HFLAG_DM)
return t0;
}
-target_ulong helper_mftc0_debug(void)
+target_ulong helper_mftc0_debug(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
int32_t tcstatus;
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
tcstatus = other->active_tc.CP0_Debug_tcstatus;
}
#if defined(TARGET_MIPS64)
-target_ulong helper_dmfc0_tcrestart (void)
+target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
{
return env->active_tc.PC;
}
-target_ulong helper_dmfc0_tchalt (void)
+target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
{
return env->active_tc.CP0_TCHalt;
}
-target_ulong helper_dmfc0_tccontext (void)
+target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
{
return env->active_tc.CP0_TCContext;
}
-target_ulong helper_dmfc0_tcschedule (void)
+target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
{
return env->active_tc.CP0_TCSchedule;
}
-target_ulong helper_dmfc0_tcschefback (void)
+target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
{
return env->active_tc.CP0_TCScheFBack;
}
-target_ulong helper_dmfc0_lladdr (void)
+target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
{
return env->lladdr >> env->CP0_LLAddr_shift;
}
-target_ulong helper_dmfc0_watchlo (uint32_t sel)
+target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
{
return env->CP0_WatchLo[sel];
}
#endif /* TARGET_MIPS64 */
-void helper_mtc0_index (target_ulong arg1)
+void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
{
int num = 1;
unsigned int tmp = env->tlb->nb_tlb;
env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
}
-void helper_mtc0_mvpcontrol (target_ulong arg1)
+void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
{
uint32_t mask = 0;
uint32_t newval;
env->mvp->CP0_MVPControl = newval;
}
-void helper_mtc0_vpecontrol (target_ulong arg1)
+void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
{
uint32_t mask;
uint32_t newval;
env->CP0_VPEControl = newval;
}
-void helper_mttc0_vpecontrol(target_ulong arg1)
+void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
uint32_t mask;
uint32_t newval;
other->CP0_VPEControl = newval;
}
-target_ulong helper_mftc0_vpecontrol(void)
+target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
/* FIXME: Mask away return zero on read bits. */
return other->CP0_VPEControl;
}
-target_ulong helper_mftc0_vpeconf0(void)
+target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
return other->CP0_VPEConf0;
}
-void helper_mtc0_vpeconf0 (target_ulong arg1)
+void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
{
uint32_t mask = 0;
uint32_t newval;
env->CP0_VPEConf0 = newval;
}
-void helper_mttc0_vpeconf0(target_ulong arg1)
+void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
uint32_t mask = 0;
uint32_t newval;
other->CP0_VPEConf0 = newval;
}
-void helper_mtc0_vpeconf1 (target_ulong arg1)
+void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
{
uint32_t mask = 0;
uint32_t newval;
env->CP0_VPEConf1 = newval;
}
-void helper_mtc0_yqmask (target_ulong arg1)
+void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
{
/* Yield qualifier inputs not implemented. */
env->CP0_YQMask = 0x00000000;
}
-void helper_mtc0_vpeopt (target_ulong arg1)
+void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_VPEOpt = arg1 & 0x0000ffff;
}
-void helper_mtc0_entrylo0 (target_ulong arg1)
+void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
{
/* Large physaddr (PABITS) not implemented */
/* 1k pages not implemented */
env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
}
-void helper_mtc0_tcstatus (target_ulong arg1)
+void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
{
uint32_t mask = env->CP0_TCStatus_rw_bitmask;
uint32_t newval;
sync_c0_tcstatus(env, env->current_tc, newval);
}
-void helper_mttc0_tcstatus (target_ulong arg1)
+void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.CP0_TCStatus = arg1;
sync_c0_tcstatus(other, other_tc, arg1);
}
-void helper_mtc0_tcbind (target_ulong arg1)
+void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
{
uint32_t mask = (1 << CP0TCBd_TBE);
uint32_t newval;
env->active_tc.CP0_TCBind = newval;
}
-void helper_mttc0_tcbind (target_ulong arg1)
+void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
uint32_t mask = (1 << CP0TCBd_TBE);
uint32_t newval;
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
mask |= (1 << CP0TCBd_CurVPE);
}
}
-void helper_mtc0_tcrestart (target_ulong arg1)
+void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
{
env->active_tc.PC = arg1;
env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
/* MIPS16 not implemented. */
}
-void helper_mttc0_tcrestart (target_ulong arg1)
+void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc) {
other->active_tc.PC = arg1;
}
}
-void helper_mtc0_tchalt (target_ulong arg1)
+void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
{
env->active_tc.CP0_TCHalt = arg1 & 0x1;
}
}
-void helper_mttc0_tchalt (target_ulong arg1)
+void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
// TODO: Halt TC / Restart (if allocated+active) TC.
}
}
-void helper_mtc0_tccontext (target_ulong arg1)
+void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
{
env->active_tc.CP0_TCContext = arg1;
}
-void helper_mttc0_tccontext (target_ulong arg1)
+void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.CP0_TCContext = arg1;
other->tcs[other_tc].CP0_TCContext = arg1;
}
-void helper_mtc0_tcschedule (target_ulong arg1)
+void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
{
env->active_tc.CP0_TCSchedule = arg1;
}
-void helper_mttc0_tcschedule (target_ulong arg1)
+void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.CP0_TCSchedule = arg1;
other->tcs[other_tc].CP0_TCSchedule = arg1;
}
-void helper_mtc0_tcschefback (target_ulong arg1)
+void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
{
env->active_tc.CP0_TCScheFBack = arg1;
}
-void helper_mttc0_tcschefback (target_ulong arg1)
+void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.CP0_TCScheFBack = arg1;
other->tcs[other_tc].CP0_TCScheFBack = arg1;
}
-void helper_mtc0_entrylo1 (target_ulong arg1)
+void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
{
/* Large physaddr (PABITS) not implemented */
/* 1k pages not implemented */
env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
}
-void helper_mtc0_context (target_ulong arg1)
+void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
}
-void helper_mtc0_pagemask (target_ulong arg1)
+void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
{
/* 1k pages not implemented */
env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
}
-void helper_mtc0_pagegrain (target_ulong arg1)
+void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
{
/* SmartMIPS not implemented */
/* Large physaddr (PABITS) not implemented */
env->CP0_PageGrain = 0;
}
-void helper_mtc0_wired (target_ulong arg1)
+void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_Wired = arg1 % env->tlb->nb_tlb;
}
-void helper_mtc0_srsconf0 (target_ulong arg1)
+void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
}
-void helper_mtc0_srsconf1 (target_ulong arg1)
+void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
}
-void helper_mtc0_srsconf2 (target_ulong arg1)
+void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
}
-void helper_mtc0_srsconf3 (target_ulong arg1)
+void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
}
-void helper_mtc0_srsconf4 (target_ulong arg1)
+void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
}
-void helper_mtc0_hwrena (target_ulong arg1)
+void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_HWREna = arg1 & 0x0000000F;
}
-void helper_mtc0_count (target_ulong arg1)
+void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
{
cpu_mips_store_count(env, arg1);
}
-void helper_mtc0_entryhi (target_ulong arg1)
+void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
{
target_ulong old, val;
cpu_mips_tlb_flush(env, 1);
}
-void helper_mttc0_entryhi(target_ulong arg1)
+void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
other->CP0_EntryHi = arg1;
sync_c0_entryhi(other, other_tc);
}
-void helper_mtc0_compare (target_ulong arg1)
+void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
{
cpu_mips_store_compare(env, arg1);
}
-void helper_mtc0_status (target_ulong arg1)
+void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
{
uint32_t val, old;
uint32_t mask = env->CP0_Status_rw_bitmask;
old = env->CP0_Status;
env->CP0_Status = (env->CP0_Status & ~mask) | val;
if (env->CP0_Config3 & (1 << CP0C3_MT)) {
- sync_c0_status(env, env->current_tc);
+ sync_c0_status(env, env, env->current_tc);
} else {
compute_hflags(env);
}
}
}
-void helper_mttc0_status(target_ulong arg1)
+void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
other->CP0_Status = arg1 & ~0xf1000018;
- sync_c0_status(other, other_tc);
+ sync_c0_status(env, other, other_tc);
}
-void helper_mtc0_intctl (target_ulong arg1)
+void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
{
/* vectored interrupts not implemented, no performance counters. */
env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
}
-void helper_mtc0_srsctl (target_ulong arg1)
+void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
{
uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
}
}
-void helper_mtc0_cause(target_ulong arg1)
+void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
{
mtc0_cause(env, arg1);
}
-void helper_mttc0_cause(target_ulong arg1)
+void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
mtc0_cause(other, arg1);
}
-target_ulong helper_mftc0_epc(void)
+target_ulong helper_mftc0_epc(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
return other->CP0_EPC;
}
-target_ulong helper_mftc0_ebase(void)
+target_ulong helper_mftc0_ebase(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
return other->CP0_EBase;
}
-void helper_mtc0_ebase (target_ulong arg1)
+void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
{
/* vectored interrupts not implemented */
env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
}
-void helper_mttc0_ebase(target_ulong arg1)
+void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
}
-target_ulong helper_mftc0_configx(target_ulong idx)
+target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
switch (idx) {
case 0: return other->CP0_Config0;
return 0;
}
-void helper_mtc0_config0 (target_ulong arg1)
+void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
}
-void helper_mtc0_config2 (target_ulong arg1)
+void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
{
/* tertiary/secondary caches not implemented */
env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
}
-void helper_mtc0_lladdr (target_ulong arg1)
+void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
{
target_long mask = env->CP0_LLAddr_rw_bitmask;
arg1 = arg1 << env->CP0_LLAddr_shift;
env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
}
-void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
+void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
{
/* Watch exceptions for instructions, data loads, data stores
not implemented. */
env->CP0_WatchLo[sel] = (arg1 & ~0x7);
}
-void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
+void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
{
env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
}
-void helper_mtc0_xcontext (target_ulong arg1)
+void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
{
target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
}
-void helper_mtc0_framemask (target_ulong arg1)
+void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_Framemask = arg1; /* XXX */
}
-void helper_mtc0_debug (target_ulong arg1)
+void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
if (arg1 & (1 << CP0DB_DM))
env->hflags &= ~MIPS_HFLAG_DM;
}
-void helper_mttc0_debug(target_ulong arg1)
+void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
/* XXX: Might be wrong, check with EJTAG spec. */
if (other_tc == other->current_tc)
(arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
}
-void helper_mtc0_performance0 (target_ulong arg1)
+void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_Performance0 = arg1 & 0x000007ff;
}
-void helper_mtc0_taglo (target_ulong arg1)
+void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_TagLo = arg1 & 0xFFFFFCF6;
}
-void helper_mtc0_datalo (target_ulong arg1)
+void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_DataLo = arg1; /* XXX */
}
-void helper_mtc0_taghi (target_ulong arg1)
+void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_TagHi = arg1; /* XXX */
}
-void helper_mtc0_datahi (target_ulong arg1)
+void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
{
env->CP0_DataHi = arg1; /* XXX */
}
/* MIPS MT functions */
-target_ulong helper_mftgpr(uint32_t sel)
+target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.gpr[sel];
return other->tcs[other_tc].gpr[sel];
}
-target_ulong helper_mftlo(uint32_t sel)
+target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.LO[sel];
return other->tcs[other_tc].LO[sel];
}
-target_ulong helper_mfthi(uint32_t sel)
+target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.HI[sel];
return other->tcs[other_tc].HI[sel];
}
-target_ulong helper_mftacx(uint32_t sel)
+target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.ACX[sel];
return other->tcs[other_tc].ACX[sel];
}
-target_ulong helper_mftdsp(void)
+target_ulong helper_mftdsp(CPUMIPSState *env)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
return other->active_tc.DSPControl;
return other->tcs[other_tc].DSPControl;
}
-void helper_mttgpr(target_ulong arg1, uint32_t sel)
+void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.gpr[sel] = arg1;
other->tcs[other_tc].gpr[sel] = arg1;
}
-void helper_mttlo(target_ulong arg1, uint32_t sel)
+void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.LO[sel] = arg1;
other->tcs[other_tc].LO[sel] = arg1;
}
-void helper_mtthi(target_ulong arg1, uint32_t sel)
+void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.HI[sel] = arg1;
other->tcs[other_tc].HI[sel] = arg1;
}
-void helper_mttacx(target_ulong arg1, uint32_t sel)
+void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.ACX[sel] = arg1;
other->tcs[other_tc].ACX[sel] = arg1;
}
-void helper_mttdsp(target_ulong arg1)
+void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
if (other_tc == other->current_tc)
other->active_tc.DSPControl = arg1;
return 0;
}
-target_ulong helper_dvpe(void)
+target_ulong helper_dvpe(CPUMIPSState *env)
{
CPUMIPSState *other_cpu = first_cpu;
target_ulong prev = env->mvp->CP0_MVPControl;
return prev;
}
-target_ulong helper_evpe(void)
+target_ulong helper_evpe(CPUMIPSState *env)
{
CPUMIPSState *other_cpu = first_cpu;
target_ulong prev = env->mvp->CP0_MVPControl;
// TODO: store to TC register
}
-target_ulong helper_yield(target_ulong arg)
+target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
{
target_long arg1 = arg;
env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
- helper_raise_exception(EXCP_THREAD);
+ helper_raise_exception(env, EXCP_THREAD);
}
}
} else if (arg1 == 0) {
if (0 /* TODO: TC underflow */) {
env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
- helper_raise_exception(EXCP_THREAD);
+ helper_raise_exception(env, EXCP_THREAD);
} else {
// TODO: Deallocate TC
}
/* Yield qualifier inputs not implemented. */
env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
- helper_raise_exception(EXCP_THREAD);
+ helper_raise_exception(env, EXCP_THREAD);
}
return env->CP0_YQMask;
}
}
}
-static void r4k_fill_tlb (int idx)
+static void r4k_fill_tlb(CPUMIPSState *env, int idx)
{
r4k_tlb_t *tlb;
tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
}
-void r4k_helper_tlbwi (void)
+void r4k_helper_tlbwi(CPUMIPSState *env)
{
int idx;
r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
r4k_invalidate_tlb(env, idx, 0);
- r4k_fill_tlb(idx);
+ r4k_fill_tlb(env, idx);
}
-void r4k_helper_tlbwr (void)
+void r4k_helper_tlbwr(CPUMIPSState *env)
{
int r = cpu_mips_get_random(env);
r4k_invalidate_tlb(env, r, 1);
- r4k_fill_tlb(r);
+ r4k_fill_tlb(env, r);
}
-void r4k_helper_tlbp (void)
+void r4k_helper_tlbp(CPUMIPSState *env)
{
r4k_tlb_t *tlb;
target_ulong mask;
}
}
-void r4k_helper_tlbr (void)
+void r4k_helper_tlbr(CPUMIPSState *env)
{
r4k_tlb_t *tlb;
uint8_t ASID;
(tlb->C1 << 3) | (tlb->PFN[1] >> 6);
}
-void helper_tlbwi(void)
+void helper_tlbwi(CPUMIPSState *env)
{
- env->tlb->helper_tlbwi();
+ env->tlb->helper_tlbwi(env);
}
-void helper_tlbwr(void)
+void helper_tlbwr(CPUMIPSState *env)
{
- env->tlb->helper_tlbwr();
+ env->tlb->helper_tlbwr(env);
}
-void helper_tlbp(void)
+void helper_tlbp(CPUMIPSState *env)
{
- env->tlb->helper_tlbp();
+ env->tlb->helper_tlbp(env);
}
-void helper_tlbr(void)
+void helper_tlbr(CPUMIPSState *env)
{
- env->tlb->helper_tlbr();
+ env->tlb->helper_tlbr(env);
}
/* Specials */
-target_ulong helper_di (void)
+target_ulong helper_di(CPUMIPSState *env)
{
target_ulong t0 = env->CP0_Status;
return t0;
}
-target_ulong helper_ei (void)
+target_ulong helper_ei(CPUMIPSState *env)
{
target_ulong t0 = env->CP0_Status;
return t0;
}
-static void debug_pre_eret (void)
+static void debug_pre_eret(CPUMIPSState *env)
{
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
}
}
-static void debug_post_eret (void)
+static void debug_post_eret(CPUMIPSState *env)
{
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
}
}
-static void set_pc (target_ulong error_pc)
+static void set_pc(CPUMIPSState *env, target_ulong error_pc)
{
env->active_tc.PC = error_pc & ~(target_ulong)1;
if (error_pc & 1) {
}
}
-void helper_eret (void)
+void helper_eret(CPUMIPSState *env)
{
- debug_pre_eret();
+ debug_pre_eret(env);
if (env->CP0_Status & (1 << CP0St_ERL)) {
- set_pc(env->CP0_ErrorEPC);
+ set_pc(env, env->CP0_ErrorEPC);
env->CP0_Status &= ~(1 << CP0St_ERL);
} else {
- set_pc(env->CP0_EPC);
+ set_pc(env, env->CP0_EPC);
env->CP0_Status &= ~(1 << CP0St_EXL);
}
compute_hflags(env);
- debug_post_eret();
+ debug_post_eret(env);
env->lladdr = 1;
}
-void helper_deret (void)
+void helper_deret(CPUMIPSState *env)
{
- debug_pre_eret();
- set_pc(env->CP0_DEPC);
+ debug_pre_eret(env);
+ set_pc(env, env->CP0_DEPC);
env->hflags &= MIPS_HFLAG_DM;
compute_hflags(env);
- debug_post_eret();
+ debug_post_eret(env);
env->lladdr = 1;
}
#endif /* !CONFIG_USER_ONLY */
-target_ulong helper_rdhwr_cpunum(void)
+target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
{
if ((env->hflags & MIPS_HFLAG_CP0) ||
(env->CP0_HWREna & (1 << 0)))
return env->CP0_EBase & 0x3ff;
else
- helper_raise_exception(EXCP_RI);
+ helper_raise_exception(env, EXCP_RI);
return 0;
}
-target_ulong helper_rdhwr_synci_step(void)
+target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
{
if ((env->hflags & MIPS_HFLAG_CP0) ||
(env->CP0_HWREna & (1 << 1)))
return env->SYNCI_Step;
else
- helper_raise_exception(EXCP_RI);
+ helper_raise_exception(env, EXCP_RI);
return 0;
}
-target_ulong helper_rdhwr_cc(void)
+target_ulong helper_rdhwr_cc(CPUMIPSState *env)
{
if ((env->hflags & MIPS_HFLAG_CP0) ||
(env->CP0_HWREna & (1 << 2)))
return env->CP0_Count;
else
- helper_raise_exception(EXCP_RI);
+ helper_raise_exception(env, EXCP_RI);
return 0;
}
-target_ulong helper_rdhwr_ccres(void)
+target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
{
if ((env->hflags & MIPS_HFLAG_CP0) ||
(env->CP0_HWREna & (1 << 3)))
return env->CCRes;
else
- helper_raise_exception(EXCP_RI);
+ helper_raise_exception(env, EXCP_RI);
return 0;
}
-void helper_pmon (int function)
+void helper_pmon(CPUMIPSState *env, int function)
{
function /= 2;
switch (function) {
}
}
-void helper_wait (void)
+void helper_wait(CPUMIPSState *env)
{
env->halted = 1;
cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
- helper_raise_exception(EXCP_HLT);
+ helper_raise_exception(env, EXCP_HLT);
}
#if !defined(CONFIG_USER_ONLY)
-static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
+static void QEMU_NORETURN do_unaligned_access(CPUMIPSState *env,
+ target_ulong addr, int is_write,
int is_user, uintptr_t retaddr);
#define MMUSUFFIX _mmu
#define SHIFT 3
#include "softmmu_template.h"
-static void do_unaligned_access(target_ulong addr, int is_write,
- int is_user, uintptr_t retaddr)
+static void do_unaligned_access(CPUMIPSState *env, target_ulong addr,
+ int is_write, int is_user, uintptr_t retaddr)
{
env->CP0_BadVAddr = addr;
- do_restore_state (retaddr);
- helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
+ do_restore_state(env, retaddr);
+ helper_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL);
}
-void tlb_fill(CPUMIPSState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUMIPSState *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
TranslationBlock *tb;
- CPUMIPSState *saved_env;
int ret;
- saved_env = env;
- env = env1;
ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (ret) {
if (retaddr) {
cpu_restore_state(tb, env, retaddr);
}
}
- helper_raise_exception_err(env->exception_index, env->error_code);
+ helper_raise_exception_err(env, env->exception_index, env->error_code);
}
- env = saved_env;
}
-void cpu_unassigned_access(CPUMIPSState *env1, target_phys_addr_t addr,
+void cpu_unassigned_access(CPUMIPSState *env, target_phys_addr_t addr,
int is_write, int is_exec, int unused, int size)
{
- env = env1;
-
if (is_exec)
- helper_raise_exception(EXCP_IBE);
+ helper_raise_exception(env, EXCP_IBE);
else
- helper_raise_exception(EXCP_DBE);
+ helper_raise_exception(env, EXCP_DBE);
}
#endif /* !CONFIG_USER_ONLY */
#define RESTORE_FLUSH_MODE \
set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
-target_ulong helper_cfc1 (uint32_t reg)
+target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg)
{
target_ulong arg1;
return arg1;
}
-void helper_ctc1 (target_ulong arg1, uint32_t reg)
+void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t reg)
{
switch(reg) {
case 25:
RESTORE_FLUSH_MODE;
set_float_exception_flags(0, &env->active_fpu.fp_status);
if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
- helper_raise_exception(EXCP_FPE);
+ helper_raise_exception(env, EXCP_FPE);
}
static inline int ieee_ex_to_mips(int xcpt)
return ret;
}
-static inline void update_fcr31(void)
+static inline void update_fcr31(CPUMIPSState *env)
{
int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
- helper_raise_exception(EXCP_FPE);
+ helper_raise_exception(env, EXCP_FPE);
else
UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
}
paired single lower "pl", paired single upper "pu". */
/* unary operations, modifying fp status */
-uint64_t helper_float_sqrt_d(uint64_t fdt0)
+uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
{
return float64_sqrt(fdt0, &env->active_fpu.fp_status);
}
-uint32_t helper_float_sqrt_s(uint32_t fst0)
+uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
{
return float32_sqrt(fst0, &env->active_fpu.fp_status);
}
-uint64_t helper_float_cvtd_s(uint32_t fst0)
+uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t fdt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint64_t helper_float_cvtd_w(uint32_t wt0)
+uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
{
uint64_t fdt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint64_t helper_float_cvtd_l(uint64_t dt0)
+uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
{
uint64_t fdt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint64_t helper_float_cvtl_d(uint64_t fdt0)
+uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint64_t helper_float_cvtl_s(uint32_t fst0)
+uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint64_t helper_float_cvtps_pw(uint64_t dt0)
+uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
{
uint32_t fst2;
uint32_t fsth2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return ((uint64_t)fsth2 << 32) | fst2;
}
-uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
+uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
uint32_t wth2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
wt2 = FLOAT_SNAN32;
wth2 = FLOAT_SNAN32;
return ((uint64_t)wth2 << 32) | wt2;
}
-uint32_t helper_float_cvts_d(uint64_t fdt0)
+uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t fst2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint32_t helper_float_cvts_w(uint32_t wt0)
+uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
{
uint32_t fst2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint32_t helper_float_cvts_l(uint64_t dt0)
+uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
{
uint32_t fst2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint32_t helper_float_cvts_pl(uint32_t wt0)
+uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
{
uint32_t wt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
wt2 = wt0;
- update_fcr31();
+ update_fcr31(env);
return wt2;
}
-uint32_t helper_float_cvts_pu(uint32_t wth0)
+uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
{
uint32_t wt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
wt2 = wth0;
- update_fcr31();
+ update_fcr31(env);
return wt2;
}
-uint32_t helper_float_cvtw_s(uint32_t fst0)
+uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint32_t helper_float_cvtw_d(uint64_t fdt0)
+uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint64_t helper_float_roundl_d(uint64_t fdt0)
+uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint64_t helper_float_roundl_s(uint32_t fst0)
+uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint32_t helper_float_roundw_d(uint64_t fdt0)
+uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint32_t helper_float_roundw_s(uint32_t fst0)
+uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint64_t helper_float_truncl_d(uint64_t fdt0)
+uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint64_t helper_float_truncl_s(uint32_t fst0)
+uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint32_t helper_float_truncw_d(uint64_t fdt0)
+uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint32_t helper_float_truncw_s(uint32_t fst0)
+uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint64_t helper_float_ceill_d(uint64_t fdt0)
+uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint64_t helper_float_ceill_s(uint32_t fst0)
+uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint32_t helper_float_ceilw_d(uint64_t fdt0)
+uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint32_t helper_float_ceilw_s(uint32_t fst0)
+uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint64_t helper_float_floorl_d(uint64_t fdt0)
+uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint64_t helper_float_floorl_s(uint32_t fst0)
+uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
dt2 = FLOAT_SNAN64;
return dt2;
}
-uint32_t helper_float_floorw_d(uint64_t fdt0)
+uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
}
-uint32_t helper_float_floorw_s(uint32_t fst0)
+uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
RESTORE_ROUNDING_MODE;
- update_fcr31();
+ update_fcr31(env);
if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
wt2 = FLOAT_SNAN32;
return wt2;
#undef FLOAT_UNOP
/* MIPS specific unary operations */
-uint64_t helper_float_recip_d(uint64_t fdt0)
+uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t fdt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint32_t helper_float_recip_s(uint32_t fst0)
+uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t fst2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint64_t helper_float_rsqrt_d(uint64_t fdt0)
+uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t fdt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint32_t helper_float_rsqrt_s(uint32_t fst0)
+uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t fst2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint64_t helper_float_recip1_d(uint64_t fdt0)
+uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t fdt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint32_t helper_float_recip1_s(uint32_t fst0)
+uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t fst2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint64_t helper_float_recip1_ps(uint64_t fdt0)
+uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t fst2;
uint32_t fsth2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return ((uint64_t)fsth2 << 32) | fst2;
}
-uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
+uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t fdt2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint32_t helper_float_rsqrt1_s(uint32_t fst0)
+uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t fst2;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
+uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t fst2;
uint32_t fsth2;
fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return ((uint64_t)fsth2 << 32) | fst2;
}
-#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
+#define FLOAT_OP(name, p) void helper_float_##name##_##p(CPUMIPSState *env)
/* binary operations */
#define FLOAT_BINOP(name) \
-uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
+uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
{ \
uint64_t dt2; \
\
set_float_exception_flags(0, &env->active_fpu.fp_status); \
dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
- update_fcr31(); \
+ update_fcr31(env); \
if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
dt2 = FLOAT_QNAN64; \
return dt2; \
} \
\
-uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
+uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
+ uint32_t fst0, uint32_t fst1) \
{ \
uint32_t wt2; \
\
set_float_exception_flags(0, &env->active_fpu.fp_status); \
wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
- update_fcr31(); \
+ update_fcr31(env); \
if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
wt2 = FLOAT_QNAN32; \
return wt2; \
} \
\
-uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
+uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
+ uint64_t fdt0, \
+ uint64_t fdt1) \
{ \
uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
uint32_t fsth0 = fdt0 >> 32; \
set_float_exception_flags(0, &env->active_fpu.fp_status); \
wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
- update_fcr31(); \
+ update_fcr31(env); \
if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
wt2 = FLOAT_QNAN32; \
wth2 = FLOAT_QNAN32; \
/* ternary operations */
#define FLOAT_TERNOP(name1, name2) \
-uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
- uint64_t fdt2) \
+uint64_t helper_float_ ## name1 ## name2 ## _d(CPUMIPSState *env, \
+ uint64_t fdt0, \
+ uint64_t fdt1, \
+ uint64_t fdt2) \
{ \
fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
} \
\
-uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
- uint32_t fst2) \
+uint32_t helper_float_ ## name1 ## name2 ## _s(CPUMIPSState *env, \
+ uint32_t fst0, \
+ uint32_t fst1, \
+ uint32_t fst2) \
{ \
fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
} \
\
-uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
- uint64_t fdt2) \
+uint64_t helper_float_ ## name1 ## name2 ## _ps(CPUMIPSState *env, \
+ uint64_t fdt0, \
+ uint64_t fdt1, \
+ uint64_t fdt2) \
{ \
uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
uint32_t fsth0 = fdt0 >> 32; \
/* negated ternary operations */
#define FLOAT_NTERNOP(name1, name2) \
-uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
- uint64_t fdt2) \
+uint64_t helper_float_n ## name1 ## name2 ## _d(CPUMIPSState *env, \
+ uint64_t fdt0, \
+ uint64_t fdt1, \
+ uint64_t fdt2) \
{ \
fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
return float64_chs(fdt2); \
} \
\
-uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
- uint32_t fst2) \
+uint32_t helper_float_n ## name1 ## name2 ## _s(CPUMIPSState *env, \
+ uint32_t fst0, \
+ uint32_t fst1, \
+ uint32_t fst2) \
{ \
fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
return float32_chs(fst2); \
} \
\
-uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
- uint64_t fdt2) \
+uint64_t helper_float_n ## name1 ## name2 ## _ps(CPUMIPSState *env, \
+ uint64_t fdt0, \
+ uint64_t fdt1, \
+ uint64_t fdt2) \
{ \
uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
uint32_t fsth0 = fdt0 >> 32; \
#undef FLOAT_NTERNOP
/* MIPS specific binary operations */
-uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
+uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
{
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
+uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
{
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
+uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
{
uint32_t fst0 = fdt0 & 0XFFFFFFFF;
uint32_t fsth0 = fdt0 >> 32;
fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
- update_fcr31();
+ update_fcr31(env);
return ((uint64_t)fsth2 << 32) | fst2;
}
-uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
+uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
{
set_float_exception_flags(0, &env->active_fpu.fp_status);
fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
- update_fcr31();
+ update_fcr31(env);
return fdt2;
}
-uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
+uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
{
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
- update_fcr31();
+ update_fcr31(env);
return fst2;
}
-uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
+uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
{
uint32_t fst0 = fdt0 & 0XFFFFFFFF;
uint32_t fsth0 = fdt0 >> 32;
fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
- update_fcr31();
+ update_fcr31(env);
return ((uint64_t)fsth2 << 32) | fst2;
}
-uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
+uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
{
uint32_t fst0 = fdt0 & 0XFFFFFFFF;
uint32_t fsth0 = fdt0 >> 32;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return ((uint64_t)fsth2 << 32) | fst2;
}
-uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
+uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
{
uint32_t fst0 = fdt0 & 0XFFFFFFFF;
uint32_t fsth0 = fdt0 >> 32;
set_float_exception_flags(0, &env->active_fpu.fp_status);
fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
- update_fcr31();
+ update_fcr31(env);
return ((uint64_t)fsth2 << 32) | fst2;
}
/* compare operations */
#define FOP_COND_D(op, cond) \
-void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
+void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
{ \
int c; \
set_float_exception_flags(0, &env->active_fpu.fp_status); \
c = cond; \
- update_fcr31(); \
+ update_fcr31(env); \
if (c) \
SET_FP_COND(cc, env->active_fpu); \
else \
CLEAR_FP_COND(cc, env->active_fpu); \
} \
-void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
+void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
{ \
int c; \
set_float_exception_flags(0, &env->active_fpu.fp_status); \
fdt0 = float64_abs(fdt0); \
fdt1 = float64_abs(fdt1); \
c = cond; \
- update_fcr31(); \
+ update_fcr31(env); \
if (c) \
SET_FP_COND(cc, env->active_fpu); \
else \
FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
#define FOP_COND_S(op, cond) \
-void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
+void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
+ uint32_t fst1, int cc) \
{ \
int c; \
set_float_exception_flags(0, &env->active_fpu.fp_status); \
c = cond; \
- update_fcr31(); \
+ update_fcr31(env); \
if (c) \
SET_FP_COND(cc, env->active_fpu); \
else \
CLEAR_FP_COND(cc, env->active_fpu); \
} \
-void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
+void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
+ uint32_t fst1, int cc) \
{ \
int c; \
set_float_exception_flags(0, &env->active_fpu.fp_status); \
fst0 = float32_abs(fst0); \
fst1 = float32_abs(fst1); \
c = cond; \
- update_fcr31(); \
+ update_fcr31(env); \
if (c) \
SET_FP_COND(cc, env->active_fpu); \
else \
FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
#define FOP_COND_PS(op, condl, condh) \
-void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
+void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
{ \
uint32_t fst0, fsth0, fst1, fsth1; \
int ch, cl; \
fsth1 = fdt1 >> 32; \
cl = condl; \
ch = condh; \
- update_fcr31(); \
+ update_fcr31(env); \
if (cl) \
SET_FP_COND(cc, env->active_fpu); \
else \
else \
CLEAR_FP_COND(cc + 1, env->active_fpu); \
} \
-void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
+void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
{ \
uint32_t fst0, fsth0, fst1, fsth1; \
int ch, cl; \
fsth1 = float32_abs(fdt1 >> 32); \
cl = condl; \
ch = condh; \
- update_fcr31(); \
+ update_fcr31(env); \
if (cl) \
SET_FP_COND(cc, env->active_fpu); \
else \
#include "gen-icount.h"
-#define gen_helper_0i(name, arg) do { \
+#define gen_helper_0e0i(name, arg) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg); \
- gen_helper_##name(helper_tmp); \
+ gen_helper_##name(cpu_env, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
-#define gen_helper_1i(name, arg1, arg2) do { \
+#define gen_helper_0e1i(name, arg1, arg2) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
- gen_helper_##name(arg1, helper_tmp); \
+ gen_helper_##name(cpu_env, arg1, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
-#define gen_helper_2i(name, arg1, arg2, arg3) do { \
+#define gen_helper_1e0i(name, ret, arg1) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg1); \
+ gen_helper_##name(ret, cpu_env, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_1e1i(name, ret, arg1, arg2) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
+ gen_helper_##name(ret, cpu_env, arg1, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_0e2i(name, arg1, arg2, arg3) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
+ gen_helper_##name(cpu_env, arg1, arg2, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_1e2i(name, ret, arg1, arg2, arg3) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
- gen_helper_##name(arg1, arg2, helper_tmp); \
+ gen_helper_##name(ret, cpu_env, arg1, arg2, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
-#define gen_helper_3i(name, arg1, arg2, arg3, arg4) do { \
+#define gen_helper_0e3i(name, arg1, arg2, arg3, arg4) do { \
TCGv_i32 helper_tmp = tcg_const_i32(arg4); \
- gen_helper_##name(arg1, arg2, arg3, helper_tmp); \
+ gen_helper_##name(cpu_env, arg1, arg2, arg3, helper_tmp); \
tcg_temp_free_i32(helper_tmp); \
} while(0)
TCGv_i32 texcp = tcg_const_i32(excp);
TCGv_i32 terr = tcg_const_i32(err);
save_cpu_state(ctx, 1);
- gen_helper_raise_exception_err(texcp, terr);
+ gen_helper_raise_exception_err(cpu_env, texcp, terr);
tcg_temp_free_i32(terr);
tcg_temp_free_i32(texcp);
}
generate_exception (DisasContext *ctx, int excp)
{
save_cpu_state(ctx, 1);
- gen_helper_0i(raise_exception, excp);
+ gen_helper_0e0i(raise_exception, excp);
}
/* Addresses computation */
gen_ldcmp_fpr##bits (ctx, fp0, fs); \
gen_ldcmp_fpr##bits (ctx, fp1, ft); \
switch (n) { \
- case 0: gen_helper_2i(cmp ## type ## _ ## fmt ## _f, fp0, fp1, cc); break;\
- case 1: gen_helper_2i(cmp ## type ## _ ## fmt ## _un, fp0, fp1, cc); break;\
- case 2: gen_helper_2i(cmp ## type ## _ ## fmt ## _eq, fp0, fp1, cc); break;\
- case 3: gen_helper_2i(cmp ## type ## _ ## fmt ## _ueq, fp0, fp1, cc); break;\
- case 4: gen_helper_2i(cmp ## type ## _ ## fmt ## _olt, fp0, fp1, cc); break;\
- case 5: gen_helper_2i(cmp ## type ## _ ## fmt ## _ult, fp0, fp1, cc); break;\
- case 6: gen_helper_2i(cmp ## type ## _ ## fmt ## _ole, fp0, fp1, cc); break;\
- case 7: gen_helper_2i(cmp ## type ## _ ## fmt ## _ule, fp0, fp1, cc); break;\
- case 8: gen_helper_2i(cmp ## type ## _ ## fmt ## _sf, fp0, fp1, cc); break;\
- case 9: gen_helper_2i(cmp ## type ## _ ## fmt ## _ngle, fp0, fp1, cc); break;\
- case 10: gen_helper_2i(cmp ## type ## _ ## fmt ## _seq, fp0, fp1, cc); break;\
- case 11: gen_helper_2i(cmp ## type ## _ ## fmt ## _ngl, fp0, fp1, cc); break;\
- case 12: gen_helper_2i(cmp ## type ## _ ## fmt ## _lt, fp0, fp1, cc); break;\
- case 13: gen_helper_2i(cmp ## type ## _ ## fmt ## _nge, fp0, fp1, cc); break;\
- case 14: gen_helper_2i(cmp ## type ## _ ## fmt ## _le, fp0, fp1, cc); break;\
- case 15: gen_helper_2i(cmp ## type ## _ ## fmt ## _ngt, fp0, fp1, cc); break;\
+ case 0: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _f, fp0, fp1, cc); break;\
+ case 1: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _un, fp0, fp1, cc); break;\
+ case 2: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _eq, fp0, fp1, cc); break;\
+ case 3: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ueq, fp0, fp1, cc); break;\
+ case 4: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _olt, fp0, fp1, cc); break;\
+ case 5: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ult, fp0, fp1, cc); break;\
+ case 6: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ole, fp0, fp1, cc); break;\
+ case 7: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ule, fp0, fp1, cc); break;\
+ case 8: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _sf, fp0, fp1, cc); break;\
+ case 9: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngle, fp0, fp1, cc); break;\
+ case 10: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _seq, fp0, fp1, cc); break;\
+ case 11: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngl, fp0, fp1, cc); break;\
+ case 12: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _lt, fp0, fp1, cc); break;\
+ case 13: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _nge, fp0, fp1, cc); break;\
+ case 14: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _le, fp0, fp1, cc); break;\
+ case 15: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngt, fp0, fp1, cc); break;\
default: abort(); \
} \
tcg_temp_free_i##bits (fp0); \
#define OP_LD_ATOMIC(insn,fname) \
static inline void op_ld_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
{ \
- gen_helper_2i(insn, ret, arg1, ctx->mem_idx); \
+ gen_helper_1e1i(insn, ret, arg1, ctx->mem_idx); \
}
#endif
OP_LD_ATOMIC(ll,ld32s);
tcg_gen_movi_tl(t0, rt | ((almask << 3) & 0x20)); \
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, llreg)); \
tcg_gen_st_tl(arg1, cpu_env, offsetof(CPUMIPSState, llnewval)); \
- gen_helper_0i(raise_exception, EXCP_SC); \
+ gen_helper_0e0i(raise_exception, EXCP_SC); \
gen_set_label(l2); \
tcg_gen_movi_tl(t0, 0); \
gen_store_gpr(t0, rt); \
static inline void op_st_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
{ \
TCGv t0 = tcg_temp_new(); \
- gen_helper_3i(insn, t0, arg1, arg2, ctx->mem_idx); \
+ gen_helper_1e2i(insn, t0, arg1, arg2, ctx->mem_idx); \
gen_store_gpr(t0, rt); \
tcg_temp_free(t0); \
}
case OPC_LDL:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_3i(ldl, t1, t1, t0, ctx->mem_idx);
+ gen_helper_1e2i(ldl, t1, t1, t0, ctx->mem_idx);
gen_store_gpr(t1, rt);
opn = "ldl";
break;
case OPC_LDR:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_3i(ldr, t1, t1, t0, ctx->mem_idx);
+ gen_helper_1e2i(ldr, t1, t1, t0, ctx->mem_idx);
gen_store_gpr(t1, rt);
opn = "ldr";
break;
case OPC_LWL:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_3i(lwl, t1, t1, t0, ctx->mem_idx);
+ gen_helper_1e2i(lwl, t1, t1, t0, ctx->mem_idx);
gen_store_gpr(t1, rt);
opn = "lwl";
break;
case OPC_LWR:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_3i(lwr, t1, t1, t0, ctx->mem_idx);
+ gen_helper_1e2i(lwr, t1, t1, t0, ctx->mem_idx);
gen_store_gpr(t1, rt);
opn = "lwr";
break;
break;
case OPC_SDL:
save_cpu_state(ctx, 1);
- gen_helper_2i(sdl, t1, t0, ctx->mem_idx);
+ gen_helper_0e2i(sdl, t1, t0, ctx->mem_idx);
opn = "sdl";
break;
case OPC_SDR:
save_cpu_state(ctx, 1);
- gen_helper_2i(sdr, t1, t0, ctx->mem_idx);
+ gen_helper_0e2i(sdr, t1, t0, ctx->mem_idx);
opn = "sdr";
break;
#endif
break;
case OPC_SWL:
save_cpu_state(ctx, 1);
- gen_helper_2i(swl, t1, t0, ctx->mem_idx);
+ gen_helper_0e2i(swl, t1, t0, ctx->mem_idx);
opn = "swl";
break;
case OPC_SWR:
save_cpu_state(ctx, 1);
- gen_helper_2i(swr, t1, t0, ctx->mem_idx);
+ gen_helper_0e2i(swr, t1, t0, ctx->mem_idx);
opn = "swr";
break;
}
opn = "ddivu";
break;
case OPC_DMULT:
- gen_helper_dmult(t0, t1);
+ gen_helper_dmult(cpu_env, t0, t1);
opn = "dmult";
break;
case OPC_DMULTU:
- gen_helper_dmultu(t0, t1);
+ gen_helper_dmultu(cpu_env, t0, t1);
opn = "dmultu";
break;
#endif
switch (opc) {
case OPC_VR54XX_MULS:
- gen_helper_muls(t0, t0, t1);
+ gen_helper_muls(t0, cpu_env, t0, t1);
opn = "muls";
break;
case OPC_VR54XX_MULSU:
- gen_helper_mulsu(t0, t0, t1);
+ gen_helper_mulsu(t0, cpu_env, t0, t1);
opn = "mulsu";
break;
case OPC_VR54XX_MACC:
- gen_helper_macc(t0, t0, t1);
+ gen_helper_macc(t0, cpu_env, t0, t1);
opn = "macc";
break;
case OPC_VR54XX_MACCU:
- gen_helper_maccu(t0, t0, t1);
+ gen_helper_maccu(t0, cpu_env, t0, t1);
opn = "maccu";
break;
case OPC_VR54XX_MSAC:
- gen_helper_msac(t0, t0, t1);
+ gen_helper_msac(t0, cpu_env, t0, t1);
opn = "msac";
break;
case OPC_VR54XX_MSACU:
- gen_helper_msacu(t0, t0, t1);
+ gen_helper_msacu(t0, cpu_env, t0, t1);
opn = "msacu";
break;
case OPC_VR54XX_MULHI:
- gen_helper_mulhi(t0, t0, t1);
+ gen_helper_mulhi(t0, cpu_env, t0, t1);
opn = "mulhi";
break;
case OPC_VR54XX_MULHIU:
- gen_helper_mulhiu(t0, t0, t1);
+ gen_helper_mulhiu(t0, cpu_env, t0, t1);
opn = "mulhiu";
break;
case OPC_VR54XX_MULSHI:
- gen_helper_mulshi(t0, t0, t1);
+ gen_helper_mulshi(t0, cpu_env, t0, t1);
opn = "mulshi";
break;
case OPC_VR54XX_MULSHIU:
- gen_helper_mulshiu(t0, t0, t1);
+ gen_helper_mulshiu(t0, cpu_env, t0, t1);
opn = "mulshiu";
break;
case OPC_VR54XX_MACCHI:
- gen_helper_macchi(t0, t0, t1);
+ gen_helper_macchi(t0, cpu_env, t0, t1);
opn = "macchi";
break;
case OPC_VR54XX_MACCHIU:
- gen_helper_macchiu(t0, t0, t1);
+ gen_helper_macchiu(t0, cpu_env, t0, t1);
opn = "macchiu";
break;
case OPC_VR54XX_MSACHI:
- gen_helper_msachi(t0, t0, t1);
+ gen_helper_msachi(t0, cpu_env, t0, t1);
opn = "msachi";
break;
case OPC_VR54XX_MSACHIU:
- gen_helper_msachiu(t0, t0, t1);
+ gen_helper_msachiu(t0, cpu_env, t0, t1);
opn = "msachiu";
break;
default:
gen_save_pc(dest);
if (ctx->singlestep_enabled) {
save_cpu_state(ctx, 0);
- gen_helper_0i(raise_exception, EXCP_DEBUG);
+ gen_helper_0e0i(raise_exception, EXCP_DEBUG);
}
tcg_gen_exit_tb(0);
}
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpcontrol(arg);
+ gen_helper_mfc0_mvpcontrol(arg, cpu_env);
rn = "MVPControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpconf0(arg);
+ gen_helper_mfc0_mvpconf0(arg, cpu_env);
rn = "MVPConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpconf1(arg);
+ gen_helper_mfc0_mvpconf1(arg, cpu_env);
rn = "MVPConf1";
break;
default:
case 1:
switch (sel) {
case 0:
- gen_helper_mfc0_random(arg);
+ gen_helper_mfc0_random(arg, cpu_env);
rn = "Random";
break;
case 1:
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcstatus(arg);
+ gen_helper_mfc0_tcstatus(arg, cpu_env);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcbind(arg);
+ gen_helper_mfc0_tcbind(arg, cpu_env);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcrestart(arg);
+ gen_helper_mfc0_tcrestart(arg, cpu_env);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tchalt(arg);
+ gen_helper_mfc0_tchalt(arg, cpu_env);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tccontext(arg);
+ gen_helper_mfc0_tccontext(arg, cpu_env);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcschedule(arg);
+ gen_helper_mfc0_tcschedule(arg, cpu_env);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcschefback(arg);
+ gen_helper_mfc0_tcschefback(arg, cpu_env);
rn = "TCScheFBack";
break;
default:
/* Mark as an IO operation because we read the time. */
if (use_icount)
gen_io_start();
- gen_helper_mfc0_count(arg);
+ gen_helper_mfc0_count(arg, cpu_env);
if (use_icount) {
gen_io_end();
}
case 17:
switch (sel) {
case 0:
- gen_helper_mfc0_lladdr(arg);
+ gen_helper_mfc0_lladdr(arg, cpu_env);
rn = "LLAddr";
break;
default:
case 18:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mfc0_watchlo, arg, sel);
+ gen_helper_1e0i(mfc0_watchlo, arg, sel);
rn = "WatchLo";
break;
default:
case 19:
switch (sel) {
case 0 ...7:
- gen_helper_1i(mfc0_watchhi, arg, sel);
+ gen_helper_1e0i(mfc0_watchhi, arg, sel);
rn = "WatchHi";
break;
default:
case 23:
switch (sel) {
case 0:
- gen_helper_mfc0_debug(arg); /* EJTAG support */
+ gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */
rn = "Debug";
break;
case 1:
case 0:
switch (sel) {
case 0:
- gen_helper_mtc0_index(arg);
+ gen_helper_mtc0_index(cpu_env, arg);
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_mvpcontrol(arg);
+ gen_helper_mtc0_mvpcontrol(cpu_env, arg);
rn = "MVPControl";
break;
case 2:
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpecontrol(arg);
+ gen_helper_mtc0_vpecontrol(cpu_env, arg);
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeconf0(arg);
+ gen_helper_mtc0_vpeconf0(cpu_env, arg);
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeconf1(arg);
+ gen_helper_mtc0_vpeconf1(cpu_env, arg);
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_yqmask(arg);
+ gen_helper_mtc0_yqmask(cpu_env, arg);
rn = "YQMask";
break;
case 5:
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeopt(arg);
+ gen_helper_mtc0_vpeopt(cpu_env, arg);
rn = "VPEOpt";
break;
default:
case 2:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo0(arg);
+ gen_helper_mtc0_entrylo0(cpu_env, arg);
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcstatus(arg);
+ gen_helper_mtc0_tcstatus(cpu_env, arg);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcbind(arg);
+ gen_helper_mtc0_tcbind(cpu_env, arg);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcrestart(arg);
+ gen_helper_mtc0_tcrestart(cpu_env, arg);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tchalt(arg);
+ gen_helper_mtc0_tchalt(cpu_env, arg);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tccontext(arg);
+ gen_helper_mtc0_tccontext(cpu_env, arg);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcschedule(arg);
+ gen_helper_mtc0_tcschedule(cpu_env, arg);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcschefback(arg);
+ gen_helper_mtc0_tcschefback(cpu_env, arg);
rn = "TCScheFBack";
break;
default:
case 3:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo1(arg);
+ gen_helper_mtc0_entrylo1(cpu_env, arg);
rn = "EntryLo1";
break;
default:
case 4:
switch (sel) {
case 0:
- gen_helper_mtc0_context(arg);
+ gen_helper_mtc0_context(cpu_env, arg);
rn = "Context";
break;
case 1:
-// gen_helper_mtc0_contextconfig(arg); /* SmartMIPS ASE */
+// gen_helper_mtc0_contextconfig(cpu_env, arg); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
case 5:
switch (sel) {
case 0:
- gen_helper_mtc0_pagemask(arg);
+ gen_helper_mtc0_pagemask(cpu_env, arg);
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_pagegrain(arg);
+ gen_helper_mtc0_pagegrain(cpu_env, arg);
rn = "PageGrain";
break;
default:
case 6:
switch (sel) {
case 0:
- gen_helper_mtc0_wired(arg);
+ gen_helper_mtc0_wired(cpu_env, arg);
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf0(arg);
+ gen_helper_mtc0_srsconf0(cpu_env, arg);
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf1(arg);
+ gen_helper_mtc0_srsconf1(cpu_env, arg);
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf2(arg);
+ gen_helper_mtc0_srsconf2(cpu_env, arg);
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf3(arg);
+ gen_helper_mtc0_srsconf3(cpu_env, arg);
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf4(arg);
+ gen_helper_mtc0_srsconf4(cpu_env, arg);
rn = "SRSConf4";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_hwrena(arg);
+ gen_helper_mtc0_hwrena(cpu_env, arg);
rn = "HWREna";
break;
default:
case 9:
switch (sel) {
case 0:
- gen_helper_mtc0_count(arg);
+ gen_helper_mtc0_count(cpu_env, arg);
rn = "Count";
break;
/* 6,7 are implementation dependent */
case 10:
switch (sel) {
case 0:
- gen_helper_mtc0_entryhi(arg);
+ gen_helper_mtc0_entryhi(cpu_env, arg);
rn = "EntryHi";
break;
default:
case 11:
switch (sel) {
case 0:
- gen_helper_mtc0_compare(arg);
+ gen_helper_mtc0_compare(cpu_env, arg);
rn = "Compare";
break;
/* 6,7 are implementation dependent */
switch (sel) {
case 0:
save_cpu_state(ctx, 1);
- gen_helper_mtc0_status(arg);
+ gen_helper_mtc0_status(cpu_env, arg);
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_intctl(arg);
+ gen_helper_mtc0_intctl(cpu_env, arg);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsctl(arg);
+ gen_helper_mtc0_srsctl(cpu_env, arg);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSCtl";
switch (sel) {
case 0:
save_cpu_state(ctx, 1);
- gen_helper_mtc0_cause(arg);
+ gen_helper_mtc0_cause(cpu_env, arg);
rn = "Cause";
break;
default:
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_ebase(arg);
+ gen_helper_mtc0_ebase(cpu_env, arg);
rn = "EBase";
break;
default:
case 16:
switch (sel) {
case 0:
- gen_helper_mtc0_config0(arg);
+ gen_helper_mtc0_config0(cpu_env, arg);
rn = "Config";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "Config1";
break;
case 2:
- gen_helper_mtc0_config2(arg);
+ gen_helper_mtc0_config2(cpu_env, arg);
rn = "Config2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
case 17:
switch (sel) {
case 0:
- gen_helper_mtc0_lladdr(arg);
+ gen_helper_mtc0_lladdr(cpu_env, arg);
rn = "LLAddr";
break;
default:
case 18:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mtc0_watchlo, arg, sel);
+ gen_helper_0e1i(mtc0_watchlo, arg, sel);
rn = "WatchLo";
break;
default:
case 19:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mtc0_watchhi, arg, sel);
+ gen_helper_0e1i(mtc0_watchhi, arg, sel);
rn = "WatchHi";
break;
default:
case 0:
#if defined(TARGET_MIPS64)
check_insn(env, ctx, ISA_MIPS3);
- gen_helper_mtc0_xcontext(arg);
+ gen_helper_mtc0_xcontext(cpu_env, arg);
rn = "XContext";
break;
#endif
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_helper_mtc0_framemask(arg);
+ gen_helper_mtc0_framemask(cpu_env, arg);
rn = "Framemask";
break;
default:
case 23:
switch (sel) {
case 0:
- gen_helper_mtc0_debug(arg); /* EJTAG support */
+ gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
rn = "Debug";
break;
case 1:
-// gen_helper_mtc0_tracecontrol(arg); /* PDtrace support */
+// gen_helper_mtc0_tracecontrol(cpu_env, arg); /* PDtrace support */
rn = "TraceControl";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
// break;
case 2:
-// gen_helper_mtc0_tracecontrol2(arg); /* PDtrace support */
+// gen_helper_mtc0_tracecontrol2(cpu_env, arg); /* PDtrace support */
rn = "TraceControl2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
case 3:
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
-// gen_helper_mtc0_usertracedata(arg); /* PDtrace support */
+// gen_helper_mtc0_usertracedata(cpu_env, arg); /* PDtrace support */
rn = "UserTraceData";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
// break;
case 4:
-// gen_helper_mtc0_tracebpc(arg); /* PDtrace support */
+// gen_helper_mtc0_tracebpc(cpu_env, arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceBPC";
case 25:
switch (sel) {
case 0:
- gen_helper_mtc0_performance0(arg);
+ gen_helper_mtc0_performance0(cpu_env, arg);
rn = "Performance0";
break;
case 1:
case 2:
case 4:
case 6:
- gen_helper_mtc0_taglo(arg);
+ gen_helper_mtc0_taglo(cpu_env, arg);
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_helper_mtc0_datalo(arg);
+ gen_helper_mtc0_datalo(cpu_env, arg);
rn = "DataLo";
break;
default:
case 2:
case 4:
case 6:
- gen_helper_mtc0_taghi(arg);
+ gen_helper_mtc0_taghi(cpu_env, arg);
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_helper_mtc0_datahi(arg);
+ gen_helper_mtc0_datahi(cpu_env, arg);
rn = "DataHi";
break;
default:
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpcontrol(arg);
+ gen_helper_mfc0_mvpcontrol(arg, cpu_env);
rn = "MVPControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpconf0(arg);
+ gen_helper_mfc0_mvpconf0(arg, cpu_env);
rn = "MVPConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpconf1(arg);
+ gen_helper_mfc0_mvpconf1(arg, cpu_env);
rn = "MVPConf1";
break;
default:
case 1:
switch (sel) {
case 0:
- gen_helper_mfc0_random(arg);
+ gen_helper_mfc0_random(arg, cpu_env);
rn = "Random";
break;
case 1:
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcstatus(arg);
+ gen_helper_mfc0_tcstatus(arg, cpu_env);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcbind(arg);
+ gen_helper_mfc0_tcbind(arg, cpu_env);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tcrestart(arg);
+ gen_helper_dmfc0_tcrestart(arg, cpu_env);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tchalt(arg);
+ gen_helper_dmfc0_tchalt(arg, cpu_env);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tccontext(arg);
+ gen_helper_dmfc0_tccontext(arg, cpu_env);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tcschedule(arg);
+ gen_helper_dmfc0_tcschedule(arg, cpu_env);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tcschefback(arg);
+ gen_helper_dmfc0_tcschefback(arg, cpu_env);
rn = "TCScheFBack";
break;
default:
/* Mark as an IO operation because we read the time. */
if (use_icount)
gen_io_start();
- gen_helper_mfc0_count(arg);
+ gen_helper_mfc0_count(arg, cpu_env);
if (use_icount) {
gen_io_end();
}
case 17:
switch (sel) {
case 0:
- gen_helper_dmfc0_lladdr(arg);
+ gen_helper_dmfc0_lladdr(arg, cpu_env);
rn = "LLAddr";
break;
default:
case 18:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(dmfc0_watchlo, arg, sel);
+ gen_helper_1e0i(dmfc0_watchlo, arg, sel);
rn = "WatchLo";
break;
default:
case 19:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mfc0_watchhi, arg, sel);
+ gen_helper_1e0i(mfc0_watchhi, arg, sel);
rn = "WatchHi";
break;
default:
case 23:
switch (sel) {
case 0:
- gen_helper_mfc0_debug(arg); /* EJTAG support */
+ gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */
rn = "Debug";
break;
case 1:
-// gen_helper_dmfc0_tracecontrol(arg); /* PDtrace support */
+// gen_helper_dmfc0_tracecontrol(arg, cpu_env); /* PDtrace support */
rn = "TraceControl";
// break;
case 2:
-// gen_helper_dmfc0_tracecontrol2(arg); /* PDtrace support */
+// gen_helper_dmfc0_tracecontrol2(arg, cpu_env); /* PDtrace support */
rn = "TraceControl2";
// break;
case 3:
-// gen_helper_dmfc0_usertracedata(arg); /* PDtrace support */
+// gen_helper_dmfc0_usertracedata(arg, cpu_env); /* PDtrace support */
rn = "UserTraceData";
// break;
case 4:
-// gen_helper_dmfc0_tracebpc(arg); /* PDtrace support */
+// gen_helper_dmfc0_tracebpc(arg, cpu_env); /* PDtrace support */
rn = "TraceBPC";
// break;
default:
case 0:
switch (sel) {
case 0:
- gen_helper_mtc0_index(arg);
+ gen_helper_mtc0_index(cpu_env, arg);
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_mvpcontrol(arg);
+ gen_helper_mtc0_mvpcontrol(cpu_env, arg);
rn = "MVPControl";
break;
case 2:
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpecontrol(arg);
+ gen_helper_mtc0_vpecontrol(cpu_env, arg);
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeconf0(arg);
+ gen_helper_mtc0_vpeconf0(cpu_env, arg);
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeconf1(arg);
+ gen_helper_mtc0_vpeconf1(cpu_env, arg);
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_yqmask(arg);
+ gen_helper_mtc0_yqmask(cpu_env, arg);
rn = "YQMask";
break;
case 5:
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeopt(arg);
+ gen_helper_mtc0_vpeopt(cpu_env, arg);
rn = "VPEOpt";
break;
default:
case 2:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo0(arg);
+ gen_helper_mtc0_entrylo0(cpu_env, arg);
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcstatus(arg);
+ gen_helper_mtc0_tcstatus(cpu_env, arg);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcbind(arg);
+ gen_helper_mtc0_tcbind(cpu_env, arg);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcrestart(arg);
+ gen_helper_mtc0_tcrestart(cpu_env, arg);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tchalt(arg);
+ gen_helper_mtc0_tchalt(cpu_env, arg);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tccontext(arg);
+ gen_helper_mtc0_tccontext(cpu_env, arg);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcschedule(arg);
+ gen_helper_mtc0_tcschedule(cpu_env, arg);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcschefback(arg);
+ gen_helper_mtc0_tcschefback(cpu_env, arg);
rn = "TCScheFBack";
break;
default:
case 3:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo1(arg);
+ gen_helper_mtc0_entrylo1(cpu_env, arg);
rn = "EntryLo1";
break;
default:
case 4:
switch (sel) {
case 0:
- gen_helper_mtc0_context(arg);
+ gen_helper_mtc0_context(cpu_env, arg);
rn = "Context";
break;
case 1:
-// gen_helper_mtc0_contextconfig(arg); /* SmartMIPS ASE */
+// gen_helper_mtc0_contextconfig(cpu_env, arg); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
case 5:
switch (sel) {
case 0:
- gen_helper_mtc0_pagemask(arg);
+ gen_helper_mtc0_pagemask(cpu_env, arg);
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_pagegrain(arg);
+ gen_helper_mtc0_pagegrain(cpu_env, arg);
rn = "PageGrain";
break;
default:
case 6:
switch (sel) {
case 0:
- gen_helper_mtc0_wired(arg);
+ gen_helper_mtc0_wired(cpu_env, arg);
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf0(arg);
+ gen_helper_mtc0_srsconf0(cpu_env, arg);
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf1(arg);
+ gen_helper_mtc0_srsconf1(cpu_env, arg);
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf2(arg);
+ gen_helper_mtc0_srsconf2(cpu_env, arg);
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf3(arg);
+ gen_helper_mtc0_srsconf3(cpu_env, arg);
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf4(arg);
+ gen_helper_mtc0_srsconf4(cpu_env, arg);
rn = "SRSConf4";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_hwrena(arg);
+ gen_helper_mtc0_hwrena(cpu_env, arg);
rn = "HWREna";
break;
default:
case 9:
switch (sel) {
case 0:
- gen_helper_mtc0_count(arg);
+ gen_helper_mtc0_count(cpu_env, arg);
rn = "Count";
break;
/* 6,7 are implementation dependent */
case 10:
switch (sel) {
case 0:
- gen_helper_mtc0_entryhi(arg);
+ gen_helper_mtc0_entryhi(cpu_env, arg);
rn = "EntryHi";
break;
default:
case 11:
switch (sel) {
case 0:
- gen_helper_mtc0_compare(arg);
+ gen_helper_mtc0_compare(cpu_env, arg);
rn = "Compare";
break;
/* 6,7 are implementation dependent */
switch (sel) {
case 0:
save_cpu_state(ctx, 1);
- gen_helper_mtc0_status(arg);
+ gen_helper_mtc0_status(cpu_env, arg);
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_intctl(arg);
+ gen_helper_mtc0_intctl(cpu_env, arg);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsctl(arg);
+ gen_helper_mtc0_srsctl(cpu_env, arg);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSCtl";
if (use_icount) {
gen_io_start();
}
- gen_helper_mtc0_cause(arg);
+ gen_helper_mtc0_cause(cpu_env, arg);
if (use_icount) {
gen_io_end();
}
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_ebase(arg);
+ gen_helper_mtc0_ebase(cpu_env, arg);
rn = "EBase";
break;
default:
case 16:
switch (sel) {
case 0:
- gen_helper_mtc0_config0(arg);
+ gen_helper_mtc0_config0(cpu_env, arg);
rn = "Config";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "Config1";
break;
case 2:
- gen_helper_mtc0_config2(arg);
+ gen_helper_mtc0_config2(cpu_env, arg);
rn = "Config2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
case 17:
switch (sel) {
case 0:
- gen_helper_mtc0_lladdr(arg);
+ gen_helper_mtc0_lladdr(cpu_env, arg);
rn = "LLAddr";
break;
default:
case 18:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mtc0_watchlo, arg, sel);
+ gen_helper_0e1i(mtc0_watchlo, arg, sel);
rn = "WatchLo";
break;
default:
case 19:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mtc0_watchhi, arg, sel);
+ gen_helper_0e1i(mtc0_watchhi, arg, sel);
rn = "WatchHi";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS3);
- gen_helper_mtc0_xcontext(arg);
+ gen_helper_mtc0_xcontext(cpu_env, arg);
rn = "XContext";
break;
default:
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_helper_mtc0_framemask(arg);
+ gen_helper_mtc0_framemask(cpu_env, arg);
rn = "Framemask";
break;
default:
case 23:
switch (sel) {
case 0:
- gen_helper_mtc0_debug(arg); /* EJTAG support */
+ gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
rn = "Debug";
break;
case 1:
-// gen_helper_mtc0_tracecontrol(arg); /* PDtrace support */
+// gen_helper_mtc0_tracecontrol(cpu_env, arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceControl";
// break;
case 2:
-// gen_helper_mtc0_tracecontrol2(arg); /* PDtrace support */
+// gen_helper_mtc0_tracecontrol2(cpu_env, arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceControl2";
// break;
case 3:
-// gen_helper_mtc0_usertracedata(arg); /* PDtrace support */
+// gen_helper_mtc0_usertracedata(cpu_env, arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "UserTraceData";
// break;
case 4:
-// gen_helper_mtc0_tracebpc(arg); /* PDtrace support */
+// gen_helper_mtc0_tracebpc(cpu_env, arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceBPC";
case 25:
switch (sel) {
case 0:
- gen_helper_mtc0_performance0(arg);
+ gen_helper_mtc0_performance0(cpu_env, arg);
rn = "Performance0";
break;
case 1:
-// gen_helper_mtc0_performance1(arg);
+// gen_helper_mtc0_performance1(cpu_env, arg);
rn = "Performance1";
// break;
case 2:
-// gen_helper_mtc0_performance2(arg);
+// gen_helper_mtc0_performance2(cpu_env, arg);
rn = "Performance2";
// break;
case 3:
-// gen_helper_mtc0_performance3(arg);
+// gen_helper_mtc0_performance3(cpu_env, arg);
rn = "Performance3";
// break;
case 4:
-// gen_helper_mtc0_performance4(arg);
+// gen_helper_mtc0_performance4(cpu_env, arg);
rn = "Performance4";
// break;
case 5:
-// gen_helper_mtc0_performance5(arg);
+// gen_helper_mtc0_performance5(cpu_env, arg);
rn = "Performance5";
// break;
case 6:
-// gen_helper_mtc0_performance6(arg);
+// gen_helper_mtc0_performance6(cpu_env, arg);
rn = "Performance6";
// break;
case 7:
-// gen_helper_mtc0_performance7(arg);
+// gen_helper_mtc0_performance7(cpu_env, arg);
rn = "Performance7";
// break;
default:
case 2:
case 4:
case 6:
- gen_helper_mtc0_taglo(arg);
+ gen_helper_mtc0_taglo(cpu_env, arg);
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_helper_mtc0_datalo(arg);
+ gen_helper_mtc0_datalo(cpu_env, arg);
rn = "DataLo";
break;
default:
case 2:
case 4:
case 6:
- gen_helper_mtc0_taghi(arg);
+ gen_helper_mtc0_taghi(cpu_env, arg);
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_helper_mtc0_datahi(arg);
+ gen_helper_mtc0_datahi(cpu_env, arg);
rn = "DataHi";
break;
default:
case 1:
switch (sel) {
case 1:
- gen_helper_mftc0_vpecontrol(t0);
+ gen_helper_mftc0_vpecontrol(t0, cpu_env);
break;
case 2:
- gen_helper_mftc0_vpeconf0(t0);
+ gen_helper_mftc0_vpeconf0(t0, cpu_env);
break;
default:
goto die;
case 2:
switch (sel) {
case 1:
- gen_helper_mftc0_tcstatus(t0);
+ gen_helper_mftc0_tcstatus(t0, cpu_env);
break;
case 2:
- gen_helper_mftc0_tcbind(t0);
+ gen_helper_mftc0_tcbind(t0, cpu_env);
break;
case 3:
- gen_helper_mftc0_tcrestart(t0);
+ gen_helper_mftc0_tcrestart(t0, cpu_env);
break;
case 4:
- gen_helper_mftc0_tchalt(t0);
+ gen_helper_mftc0_tchalt(t0, cpu_env);
break;
case 5:
- gen_helper_mftc0_tccontext(t0);
+ gen_helper_mftc0_tccontext(t0, cpu_env);
break;
case 6:
- gen_helper_mftc0_tcschedule(t0);
+ gen_helper_mftc0_tcschedule(t0, cpu_env);
break;
case 7:
- gen_helper_mftc0_tcschefback(t0);
+ gen_helper_mftc0_tcschefback(t0, cpu_env);
break;
default:
gen_mfc0(env, ctx, t0, rt, sel);
case 10:
switch (sel) {
case 0:
- gen_helper_mftc0_entryhi(t0);
+ gen_helper_mftc0_entryhi(t0, cpu_env);
break;
default:
gen_mfc0(env, ctx, t0, rt, sel);
case 12:
switch (sel) {
case 0:
- gen_helper_mftc0_status(t0);
+ gen_helper_mftc0_status(t0, cpu_env);
break;
default:
gen_mfc0(env, ctx, t0, rt, sel);
case 13:
switch (sel) {
case 0:
- gen_helper_mftc0_cause(t0);
+ gen_helper_mftc0_cause(t0, cpu_env);
break;
default:
goto die;
case 14:
switch (sel) {
case 0:
- gen_helper_mftc0_epc(t0);
+ gen_helper_mftc0_epc(t0, cpu_env);
break;
default:
goto die;
case 15:
switch (sel) {
case 1:
- gen_helper_mftc0_ebase(t0);
+ gen_helper_mftc0_ebase(t0, cpu_env);
break;
default:
goto die;
case 16:
switch (sel) {
case 0 ... 7:
- gen_helper_mftc0_configx(t0, tcg_const_tl(sel));
+ gen_helper_mftc0_configx(t0, cpu_env, tcg_const_tl(sel));
break;
default:
goto die;
case 23:
switch (sel) {
case 0:
- gen_helper_mftc0_debug(t0);
+ gen_helper_mftc0_debug(t0, cpu_env);
break;
default:
gen_mfc0(env, ctx, t0, rt, sel);
} else switch (sel) {
/* GPR registers. */
case 0:
- gen_helper_1i(mftgpr, t0, rt);
+ gen_helper_1e0i(mftgpr, t0, rt);
break;
/* Auxiliary CPU registers */
case 1:
switch (rt) {
case 0:
- gen_helper_1i(mftlo, t0, 0);
+ gen_helper_1e0i(mftlo, t0, 0);
break;
case 1:
- gen_helper_1i(mfthi, t0, 0);
+ gen_helper_1e0i(mfthi, t0, 0);
break;
case 2:
- gen_helper_1i(mftacx, t0, 0);
+ gen_helper_1e0i(mftacx, t0, 0);
break;
case 4:
- gen_helper_1i(mftlo, t0, 1);
+ gen_helper_1e0i(mftlo, t0, 1);
break;
case 5:
- gen_helper_1i(mfthi, t0, 1);
+ gen_helper_1e0i(mfthi, t0, 1);
break;
case 6:
- gen_helper_1i(mftacx, t0, 1);
+ gen_helper_1e0i(mftacx, t0, 1);
break;
case 8:
- gen_helper_1i(mftlo, t0, 2);
+ gen_helper_1e0i(mftlo, t0, 2);
break;
case 9:
- gen_helper_1i(mfthi, t0, 2);
+ gen_helper_1e0i(mfthi, t0, 2);
break;
case 10:
- gen_helper_1i(mftacx, t0, 2);
+ gen_helper_1e0i(mftacx, t0, 2);
break;
case 12:
- gen_helper_1i(mftlo, t0, 3);
+ gen_helper_1e0i(mftlo, t0, 3);
break;
case 13:
- gen_helper_1i(mfthi, t0, 3);
+ gen_helper_1e0i(mfthi, t0, 3);
break;
case 14:
- gen_helper_1i(mftacx, t0, 3);
+ gen_helper_1e0i(mftacx, t0, 3);
break;
case 16:
- gen_helper_mftdsp(t0);
+ gen_helper_mftdsp(t0, cpu_env);
break;
default:
goto die;
break;
case 3:
/* XXX: For now we support only a single FPU context. */
- gen_helper_1i(cfc1, t0, rt);
+ gen_helper_1e0i(cfc1, t0, rt);
break;
/* COP2: Not implemented. */
case 4:
case 1:
switch (sel) {
case 1:
- gen_helper_mttc0_vpecontrol(t0);
+ gen_helper_mttc0_vpecontrol(cpu_env, t0);
break;
case 2:
- gen_helper_mttc0_vpeconf0(t0);
+ gen_helper_mttc0_vpeconf0(cpu_env, t0);
break;
default:
goto die;
case 2:
switch (sel) {
case 1:
- gen_helper_mttc0_tcstatus(t0);
+ gen_helper_mttc0_tcstatus(cpu_env, t0);
break;
case 2:
- gen_helper_mttc0_tcbind(t0);
+ gen_helper_mttc0_tcbind(cpu_env, t0);
break;
case 3:
- gen_helper_mttc0_tcrestart(t0);
+ gen_helper_mttc0_tcrestart(cpu_env, t0);
break;
case 4:
- gen_helper_mttc0_tchalt(t0);
+ gen_helper_mttc0_tchalt(cpu_env, t0);
break;
case 5:
- gen_helper_mttc0_tccontext(t0);
+ gen_helper_mttc0_tccontext(cpu_env, t0);
break;
case 6:
- gen_helper_mttc0_tcschedule(t0);
+ gen_helper_mttc0_tcschedule(cpu_env, t0);
break;
case 7:
- gen_helper_mttc0_tcschefback(t0);
+ gen_helper_mttc0_tcschefback(cpu_env, t0);
break;
default:
gen_mtc0(env, ctx, t0, rd, sel);
case 10:
switch (sel) {
case 0:
- gen_helper_mttc0_entryhi(t0);
+ gen_helper_mttc0_entryhi(cpu_env, t0);
break;
default:
gen_mtc0(env, ctx, t0, rd, sel);
case 12:
switch (sel) {
case 0:
- gen_helper_mttc0_status(t0);
+ gen_helper_mttc0_status(cpu_env, t0);
break;
default:
gen_mtc0(env, ctx, t0, rd, sel);
case 13:
switch (sel) {
case 0:
- gen_helper_mttc0_cause(t0);
+ gen_helper_mttc0_cause(cpu_env, t0);
break;
default:
goto die;
case 15:
switch (sel) {
case 1:
- gen_helper_mttc0_ebase(t0);
+ gen_helper_mttc0_ebase(cpu_env, t0);
break;
default:
goto die;
case 23:
switch (sel) {
case 0:
- gen_helper_mttc0_debug(t0);
+ gen_helper_mttc0_debug(cpu_env, t0);
break;
default:
gen_mtc0(env, ctx, t0, rd, sel);
} else switch (sel) {
/* GPR registers. */
case 0:
- gen_helper_1i(mttgpr, t0, rd);
+ gen_helper_0e1i(mttgpr, t0, rd);
break;
/* Auxiliary CPU registers */
case 1:
switch (rd) {
case 0:
- gen_helper_1i(mttlo, t0, 0);
+ gen_helper_0e1i(mttlo, t0, 0);
break;
case 1:
- gen_helper_1i(mtthi, t0, 0);
+ gen_helper_0e1i(mtthi, t0, 0);
break;
case 2:
- gen_helper_1i(mttacx, t0, 0);
+ gen_helper_0e1i(mttacx, t0, 0);
break;
case 4:
- gen_helper_1i(mttlo, t0, 1);
+ gen_helper_0e1i(mttlo, t0, 1);
break;
case 5:
- gen_helper_1i(mtthi, t0, 1);
+ gen_helper_0e1i(mtthi, t0, 1);
break;
case 6:
- gen_helper_1i(mttacx, t0, 1);
+ gen_helper_0e1i(mttacx, t0, 1);
break;
case 8:
- gen_helper_1i(mttlo, t0, 2);
+ gen_helper_0e1i(mttlo, t0, 2);
break;
case 9:
- gen_helper_1i(mtthi, t0, 2);
+ gen_helper_0e1i(mtthi, t0, 2);
break;
case 10:
- gen_helper_1i(mttacx, t0, 2);
+ gen_helper_0e1i(mttacx, t0, 2);
break;
case 12:
- gen_helper_1i(mttlo, t0, 3);
+ gen_helper_0e1i(mttlo, t0, 3);
break;
case 13:
- gen_helper_1i(mtthi, t0, 3);
+ gen_helper_0e1i(mtthi, t0, 3);
break;
case 14:
- gen_helper_1i(mttacx, t0, 3);
+ gen_helper_0e1i(mttacx, t0, 3);
break;
case 16:
- gen_helper_mttdsp(t0);
+ gen_helper_mttdsp(cpu_env, t0);
break;
default:
goto die;
break;
case 3:
/* XXX: For now we support only a single FPU context. */
- gen_helper_1i(ctc1, t0, rd);
+ gen_helper_0e1i(ctc1, t0, rd);
break;
/* COP2: Not implemented. */
case 4:
opn = "tlbwi";
if (!env->tlb->helper_tlbwi)
goto die;
- gen_helper_tlbwi();
+ gen_helper_tlbwi(cpu_env);
break;
case OPC_TLBWR:
opn = "tlbwr";
if (!env->tlb->helper_tlbwr)
goto die;
- gen_helper_tlbwr();
+ gen_helper_tlbwr(cpu_env);
break;
case OPC_TLBP:
opn = "tlbp";
if (!env->tlb->helper_tlbp)
goto die;
- gen_helper_tlbp();
+ gen_helper_tlbp(cpu_env);
break;
case OPC_TLBR:
opn = "tlbr";
if (!env->tlb->helper_tlbr)
goto die;
- gen_helper_tlbr();
+ gen_helper_tlbr(cpu_env);
break;
case OPC_ERET:
opn = "eret";
check_insn(env, ctx, ISA_MIPS2);
- gen_helper_eret();
+ gen_helper_eret(cpu_env);
ctx->bstate = BS_EXCP;
break;
case OPC_DERET:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
} else {
- gen_helper_deret();
+ gen_helper_deret(cpu_env);
ctx->bstate = BS_EXCP;
}
break;
ctx->pc += 4;
save_cpu_state(ctx, 1);
ctx->pc -= 4;
- gen_helper_wait();
+ gen_helper_wait(cpu_env);
ctx->bstate = BS_EXCP;
break;
default:
opn = "mtc1";
break;
case OPC_CFC1:
- gen_helper_1i(cfc1, t0, fs);
+ gen_helper_1e0i(cfc1, t0, fs);
gen_store_gpr(t0, rt);
opn = "cfc1";
break;
case OPC_CTC1:
gen_load_gpr(t0, rt);
- gen_helper_1i(ctc1, t0, fs);
+ gen_helper_0e1i(ctc1, t0, fs);
opn = "ctc1";
break;
#if defined(TARGET_MIPS64)
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_helper_float_add_s(fp0, fp0, fp1);
+ gen_helper_float_add_s(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_helper_float_sub_s(fp0, fp0, fp1);
+ gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_helper_float_mul_s(fp0, fp0, fp1);
+ gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_helper_float_div_s(fp0, fp0, fp1);
+ gen_helper_float_div_s(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_sqrt_s(fp0, fp0);
+ gen_helper_float_sqrt_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(fp32, fs);
- gen_helper_float_roundl_s(fp64, fp32);
+ gen_helper_float_roundl_s(fp64, cpu_env, fp32);
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(fp32, fs);
- gen_helper_float_truncl_s(fp64, fp32);
+ gen_helper_float_truncl_s(fp64, cpu_env, fp32);
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(fp32, fs);
- gen_helper_float_ceill_s(fp64, fp32);
+ gen_helper_float_ceill_s(fp64, cpu_env, fp32);
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(fp32, fs);
- gen_helper_float_floorl_s(fp64, fp32);
+ gen_helper_float_floorl_s(fp64, cpu_env, fp32);
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_roundw_s(fp0, fp0);
+ gen_helper_float_roundw_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_truncw_s(fp0, fp0);
+ gen_helper_float_truncw_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_ceilw_s(fp0, fp0);
+ gen_helper_float_ceilw_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_floorw_s(fp0, fp0);
+ gen_helper_float_floorw_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_recip_s(fp0, fp0);
+ gen_helper_float_recip_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_rsqrt_s(fp0, fp0);
+ gen_helper_float_rsqrt_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_helper_float_recip2_s(fp0, fp0, fp1);
+ gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_recip1_s(fp0, fp0);
+ gen_helper_float_recip1_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_rsqrt1_s(fp0, fp0);
+ gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_helper_float_rsqrt2_s(fp0, fp0, fp1);
+ gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(fp32, fs);
- gen_helper_float_cvtd_s(fp64, fp32);
+ gen_helper_float_cvtd_s(fp64, cpu_env, fp32);
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_cvtw_s(fp0, fp0);
+ gen_helper_float_cvtw_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(fp32, fs);
- gen_helper_float_cvtl_s(fp64, fp32);
+ gen_helper_float_cvtl_s(fp64, cpu_env, fp32);
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_add_d(fp0, fp0, fp1);
+ gen_helper_float_add_d(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_sub_d(fp0, fp0, fp1);
+ gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_mul_d(fp0, fp0, fp1);
+ gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_div_d(fp0, fp0, fp1);
+ gen_helper_float_div_d(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_sqrt_d(fp0, fp0);
+ gen_helper_float_sqrt_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_roundl_d(fp0, fp0);
+ gen_helper_float_roundl_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_truncl_d(fp0, fp0);
+ gen_helper_float_truncl_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_ceill_d(fp0, fp0);
+ gen_helper_float_ceill_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_floorl_d(fp0, fp0);
+ gen_helper_float_floorl_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_roundw_d(fp32, fp64);
+ gen_helper_float_roundw_d(fp32, cpu_env, fp64);
tcg_temp_free_i64(fp64);
gen_store_fpr32(fp32, fd);
tcg_temp_free_i32(fp32);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_truncw_d(fp32, fp64);
+ gen_helper_float_truncw_d(fp32, cpu_env, fp64);
tcg_temp_free_i64(fp64);
gen_store_fpr32(fp32, fd);
tcg_temp_free_i32(fp32);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_ceilw_d(fp32, fp64);
+ gen_helper_float_ceilw_d(fp32, cpu_env, fp64);
tcg_temp_free_i64(fp64);
gen_store_fpr32(fp32, fd);
tcg_temp_free_i32(fp32);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_floorw_d(fp32, fp64);
+ gen_helper_float_floorw_d(fp32, cpu_env, fp64);
tcg_temp_free_i64(fp64);
gen_store_fpr32(fp32, fd);
tcg_temp_free_i32(fp32);
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_recip_d(fp0, fp0);
+ gen_helper_float_recip_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_rsqrt_d(fp0, fp0);
+ gen_helper_float_rsqrt_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_recip2_d(fp0, fp0, fp1);
+ gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_recip1_d(fp0, fp0);
+ gen_helper_float_recip1_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_rsqrt1_d(fp0, fp0);
+ gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_rsqrt2_d(fp0, fp0, fp1);
+ gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_cvts_d(fp32, fp64);
+ gen_helper_float_cvts_d(fp32, cpu_env, fp64);
tcg_temp_free_i64(fp64);
gen_store_fpr32(fp32, fd);
tcg_temp_free_i32(fp32);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_cvtw_d(fp32, fp64);
+ gen_helper_float_cvtw_d(fp32, cpu_env, fp64);
tcg_temp_free_i64(fp64);
gen_store_fpr32(fp32, fd);
tcg_temp_free_i32(fp32);
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_cvtl_d(fp0, fp0);
+ gen_helper_float_cvtl_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_cvts_w(fp0, fp0);
+ gen_helper_float_cvts_w(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(fp32, fs);
- gen_helper_float_cvtd_w(fp64, fp32);
+ gen_helper_float_cvtd_w(fp64, cpu_env, fp32);
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_cvts_l(fp32, fp64);
+ gen_helper_float_cvts_l(fp32, cpu_env, fp64);
tcg_temp_free_i64(fp64);
gen_store_fpr32(fp32, fd);
tcg_temp_free_i32(fp32);
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_cvtd_l(fp0, fp0);
+ gen_helper_float_cvtd_l(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_cvtps_pw(fp0, fp0);
+ gen_helper_float_cvtps_pw(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_add_ps(fp0, fp0, fp1);
+ gen_helper_float_add_ps(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_sub_ps(fp0, fp0, fp1);
+ gen_helper_float_sub_ps(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_mul_ps(fp0, fp0, fp1);
+ gen_helper_float_mul_ps(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_load_fpr64(ctx, fp0, ft);
gen_load_fpr64(ctx, fp1, fs);
- gen_helper_float_addr_ps(fp0, fp0, fp1);
+ gen_helper_float_addr_ps(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_load_fpr64(ctx, fp0, ft);
gen_load_fpr64(ctx, fp1, fs);
- gen_helper_float_mulr_ps(fp0, fp0, fp1);
+ gen_helper_float_mulr_ps(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_recip2_ps(fp0, fp0, fp1);
+ gen_helper_float_recip2_ps(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_recip1_ps(fp0, fp0);
+ gen_helper_float_recip1_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_rsqrt1_ps(fp0, fp0);
+ gen_helper_float_rsqrt1_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_rsqrt2_ps(fp0, fp0, fp1);
+ gen_helper_float_rsqrt2_ps(fp0, cpu_env, fp0, fp1);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32h(fp0, fs);
- gen_helper_float_cvts_pu(fp0, fp0);
+ gen_helper_float_cvts_pu(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_cvtpw_ps(fp0, fp0);
+ gen_helper_float_cvtpw_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_cvts_pl(fp0, fp0);
+ gen_helper_float_cvts_pl(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
gen_load_fpr32(fp2, fr);
- gen_helper_float_muladd_s(fp2, fp0, fp1, fp2);
+ gen_helper_float_muladd_s(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i32(fp0);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp2, fd);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_muladd_d(fp2, fp0, fp1, fp2);
+ gen_helper_float_muladd_d(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i64(fp0);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_muladd_ps(fp2, fp0, fp1, fp2);
+ gen_helper_float_muladd_ps(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i64(fp0);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
gen_load_fpr32(fp2, fr);
- gen_helper_float_mulsub_s(fp2, fp0, fp1, fp2);
+ gen_helper_float_mulsub_s(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i32(fp0);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp2, fd);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_mulsub_d(fp2, fp0, fp1, fp2);
+ gen_helper_float_mulsub_d(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i64(fp0);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_mulsub_ps(fp2, fp0, fp1, fp2);
+ gen_helper_float_mulsub_ps(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i64(fp0);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
gen_load_fpr32(fp2, fr);
- gen_helper_float_nmuladd_s(fp2, fp0, fp1, fp2);
+ gen_helper_float_nmuladd_s(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i32(fp0);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp2, fd);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmuladd_d(fp2, fp0, fp1, fp2);
+ gen_helper_float_nmuladd_d(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i64(fp0);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmuladd_ps(fp2, fp0, fp1, fp2);
+ gen_helper_float_nmuladd_ps(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i64(fp0);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
gen_load_fpr32(fp2, fr);
- gen_helper_float_nmulsub_s(fp2, fp0, fp1, fp2);
+ gen_helper_float_nmulsub_s(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i32(fp0);
tcg_temp_free_i32(fp1);
gen_store_fpr32(fp2, fd);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmulsub_d(fp2, fp0, fp1, fp2);
+ gen_helper_float_nmulsub_d(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i64(fp0);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmulsub_ps(fp2, fp0, fp1, fp2);
+ gen_helper_float_nmulsub_ps(fp2, cpu_env, fp0, fp1, fp2);
tcg_temp_free_i64(fp0);
tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
switch (rd) {
case 0:
save_cpu_state(ctx, 1);
- gen_helper_rdhwr_cpunum(t0);
+ gen_helper_rdhwr_cpunum(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 1:
save_cpu_state(ctx, 1);
- gen_helper_rdhwr_synci_step(t0);
+ gen_helper_rdhwr_synci_step(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 2:
save_cpu_state(ctx, 1);
- gen_helper_rdhwr_cc(t0);
+ gen_helper_rdhwr_cc(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 3:
save_cpu_state(ctx, 1);
- gen_helper_rdhwr_ccres(t0);
+ gen_helper_rdhwr_ccres(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 29:
}
if (ctx->singlestep_enabled) {
save_cpu_state(ctx, 0);
- gen_helper_0i(raise_exception, EXCP_DEBUG);
+ gen_helper_0e0i(raise_exception, EXCP_DEBUG);
}
tcg_gen_exit_tb(0);
break;
static int decode_extended_mips16_opc (CPUMIPSState *env, DisasContext *ctx,
int *is_branch)
{
- int extend = lduw_code(ctx->pc + 2);
+ int extend = cpu_lduw_code(env, ctx->pc + 2);
int op, rx, ry, funct, sa;
int16_t imm, offset;
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_JAL:
- offset = lduw_code(ctx->pc + 2);
+ offset = cpu_lduw_code(env, ctx->pc + 2);
offset = (((ctx->opcode & 0x1f) << 21)
| ((ctx->opcode >> 5) & 0x1f) << 16
| offset) << 2;
save_cpu_state(ctx, 1);
switch (opc) {
case LWM32:
- gen_helper_lwm(t0, t1, t2);
+ gen_helper_lwm(cpu_env, t0, t1, t2);
break;
case SWM32:
- gen_helper_swm(t0, t1, t2);
+ gen_helper_swm(cpu_env, t0, t1, t2);
break;
#ifdef TARGET_MIPS64
case LDM:
- gen_helper_ldm(t0, t1, t2);
+ gen_helper_ldm(cpu_env, t0, t1, t2);
break;
case SDM:
- gen_helper_sdm(t0, t1, t2);
+ gen_helper_sdm(cpu_env, t0, t1, t2);
break;
#endif
}
TCGv t0 = tcg_temp_new();
save_cpu_state(ctx, 1);
- gen_helper_di(t0);
+ gen_helper_di(t0, cpu_env);
gen_store_gpr(t0, rs);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
TCGv t0 = tcg_temp_new();
save_cpu_state(ctx, 1);
- gen_helper_ei(t0);
+ gen_helper_ei(t0, cpu_env);
gen_store_gpr(t0, rs);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
uint32_t op, minor, mips32_op;
uint32_t cond, fmt, cc;
- insn = lduw_code(ctx->pc + 2);
+ insn = cpu_lduw_code(env, ctx->pc + 2);
ctx->opcode = (ctx->opcode << 16) | insn;
rt = (ctx->opcode >> 21) & 0x1f;
MIPS_INVAL("PMON / selsl");
generate_exception(ctx, EXCP_RI);
#else
- gen_helper_0i(pmon, sa);
+ gen_helper_0e0i(pmon, sa);
#endif
break;
case OPC_SYSCALL:
save_cpu_state(ctx, 1);
gen_load_gpr(t0, rs);
- gen_helper_yield(t0, t0);
+ gen_helper_yield(t0, cpu_env, t0);
gen_store_gpr(t0, rd);
tcg_temp_free(t0);
}
break;
case OPC_DVPE:
check_insn(env, ctx, ASE_MT);
- gen_helper_dvpe(t0);
+ gen_helper_dvpe(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case OPC_EVPE:
check_insn(env, ctx, ASE_MT);
- gen_helper_evpe(t0);
+ gen_helper_evpe(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case OPC_DI:
check_insn(env, ctx, ISA_MIPS32R2);
save_cpu_state(ctx, 1);
- gen_helper_di(t0);
+ gen_helper_di(t0, cpu_env);
gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
case OPC_EI:
check_insn(env, ctx, ISA_MIPS32R2);
save_cpu_state(ctx, 1);
- gen_helper_ei(t0);
+ gen_helper_ei(t0, cpu_env);
gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
- gen_helper_0i(raise_exception, EXCP_DEBUG);
+ gen_helper_0e0i(raise_exception, EXCP_DEBUG);
/* Include the breakpoint location or the tb won't
* be flushed when it must be. */
ctx.pc += 4;
is_branch = 0;
if (!(ctx.hflags & MIPS_HFLAG_M16)) {
- ctx.opcode = ldl_code(ctx.pc);
+ ctx.opcode = cpu_ldl_code(env, ctx.pc);
insn_bytes = 4;
decode_opc(env, &ctx, &is_branch);
} else if (env->insn_flags & ASE_MICROMIPS) {
- ctx.opcode = lduw_code(ctx.pc);
+ ctx.opcode = cpu_lduw_code(env, ctx.pc);
insn_bytes = decode_micromips_opc(env, &ctx, &is_branch);
} else if (env->insn_flags & ASE_MIPS16) {
- ctx.opcode = lduw_code(ctx.pc);
+ ctx.opcode = cpu_lduw_code(env, ctx.pc);
insn_bytes = decode_mips16_opc(env, &ctx, &is_branch);
} else {
generate_exception(&ctx, EXCP_RI);
gen_io_end();
if (env->singlestep_enabled && ctx.bstate != BS_BRANCH) {
save_cpu_state(&ctx, ctx.bstate == BS_NONE);
- gen_helper_0i(raise_exception, EXCP_DEBUG);
+ gen_helper_0e0i(raise_exception, EXCP_DEBUG);
} else {
switch (ctx.bstate) {
case BS_STOP:
#endif
}
-static inline uint64_t ld_code2(uint64_t pc)
+static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
{
- return (uint64_t)cpu_lduw_code(cpu_single_env, pc);
+ return (uint64_t)cpu_lduw_code(env, pc);
}
-static inline uint64_t ld_code4(uint64_t pc)
+static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
{
- return (uint64_t)cpu_ldl_code(cpu_single_env, pc);
+ return (uint64_t)cpu_ldl_code(env, pc);
}
-static inline uint64_t ld_code6(uint64_t pc)
+static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
{
uint64_t opc;
- opc = (uint64_t)cpu_lduw_code(cpu_single_env, pc) << 32;
- opc |= (uint64_t)(uint32_t)cpu_ldl_code(cpu_single_env, pc + 2);
+ opc = (uint64_t)cpu_lduw_code(env, pc) << 32;
+ opc |= (uint64_t)(uint32_t)cpu_ldl_code(env, pc + 2);
return opc;
}
#ifdef CONFIG_USER_ONLY
-static void gen_illegal_opcode(DisasContext *s, int ilc)
+static void gen_illegal_opcode(CPUS390XState *env, DisasContext *s, int ilc)
{
TCGv_i32 tmp = tcg_const_i32(EXCP_SPEC);
update_psw_addr(s);
#else /* CONFIG_USER_ONLY */
-static void debug_print_inst(DisasContext *s, int ilc)
+static void debug_print_inst(CPUS390XState *env, DisasContext *s, int ilc)
{
#ifdef DEBUG_ILLEGAL_INSTRUCTIONS
uint64_t inst = 0;
switch (ilc & 3) {
case 1:
- inst = ld_code2(s->pc);
+ inst = ld_code2(env, s->pc);
break;
case 2:
- inst = ld_code4(s->pc);
+ inst = ld_code4(env, s->pc);
break;
case 3:
- inst = ld_code6(s->pc);
+ inst = ld_code6(env, s->pc);
break;
}
#endif
}
-static void gen_program_exception(DisasContext *s, int ilc, int code)
+static void gen_program_exception(CPUS390XState *env, DisasContext *s, int ilc,
+ int code)
{
TCGv_i32 tmp;
- debug_print_inst(s, ilc);
+ debug_print_inst(env, s, ilc);
/* remember what pgm exeption this was */
tmp = tcg_const_i32(code);
}
-static void gen_illegal_opcode(DisasContext *s, int ilc)
+static void gen_illegal_opcode(CPUS390XState *env, DisasContext *s, int ilc)
{
- gen_program_exception(s, ilc, PGM_SPECIFICATION);
+ gen_program_exception(env, s, ilc, PGM_SPECIFICATION);
}
-static void gen_privileged_exception(DisasContext *s, int ilc)
+static void gen_privileged_exception(CPUS390XState *env, DisasContext *s,
+ int ilc)
{
- gen_program_exception(s, ilc, PGM_PRIVILEGED);
+ gen_program_exception(env, s, ilc, PGM_PRIVILEGED);
}
-static void check_privileged(DisasContext *s, int ilc)
+static void check_privileged(CPUS390XState *env, DisasContext *s, int ilc)
{
if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
- gen_privileged_exception(s, ilc);
+ gen_privileged_exception(env, s, ilc);
}
}
set_cc_static(s);
}
-static void disas_e3(DisasContext* s, int op, int r1, int x2, int b2, int d2)
+static void disas_e3(CPUS390XState *env, DisasContext* s, int op, int r1,
+ int x2, int b2, int d2)
{
TCGv_i64 addr, tmp, tmp2, tmp3, tmp4;
TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
break;
default:
LOG_DISAS("illegal e3 operation 0x%x\n", op);
- gen_illegal_opcode(s, 3);
+ gen_illegal_opcode(env, s, 3);
break;
}
tcg_temp_free_i64(addr);
}
#ifndef CONFIG_USER_ONLY
-static void disas_e5(DisasContext* s, uint64_t insn)
+static void disas_e5(CPUS390XState *env, DisasContext* s, uint64_t insn)
{
TCGv_i64 tmp, tmp2;
int op = (insn >> 32) & 0xff;
break;
default:
LOG_DISAS("illegal e5 operation 0x%x\n", op);
- gen_illegal_opcode(s, 3);
+ gen_illegal_opcode(env, s, 3);
break;
}
}
#endif
-static void disas_eb(DisasContext *s, int op, int r1, int r3, int b2, int d2)
+static void disas_eb(CPUS390XState *env, DisasContext *s, int op, int r1,
+ int r3, int b2, int d2)
{
TCGv_i64 tmp, tmp2, tmp3, tmp4;
TCGv_i32 tmp32_1, tmp32_2;
#ifndef CONFIG_USER_ONLY
case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
/* Load Control */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
tmp32_2 = tcg_const_i32(r3);
break;
case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
/* Store Control */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
tmp32_2 = tcg_const_i32(r3);
break;
default:
LOG_DISAS("illegal eb operation 0x%x\n", op);
- gen_illegal_opcode(s, ilc);
+ gen_illegal_opcode(env, s, ilc);
break;
}
}
-static void disas_ed(DisasContext *s, int op, int r1, int x2, int b2, int d2,
- int r1b)
+static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
+ int x2, int b2, int d2, int r1b)
{
TCGv_i32 tmp_r1, tmp32;
TCGv_i64 addr, tmp;
break;
default:
LOG_DISAS("illegal ed operation 0x%x\n", op);
- gen_illegal_opcode(s, 3);
+ gen_illegal_opcode(env, s, 3);
return;
}
tcg_temp_free_i32(tmp_r1);
tcg_temp_free_i64(addr);
}
-static void disas_a5(DisasContext *s, int op, int r1, int i2)
+static void disas_a5(CPUS390XState *env, DisasContext *s, int op, int r1,
+ int i2)
{
TCGv_i64 tmp, tmp2;
TCGv_i32 tmp32;
break;
default:
LOG_DISAS("illegal a5 operation 0x%x\n", op);
- gen_illegal_opcode(s, 2);
+ gen_illegal_opcode(env, s, 2);
return;
}
}
-static void disas_a7(DisasContext *s, int op, int r1, int i2)
+static void disas_a7(CPUS390XState *env, DisasContext *s, int op, int r1,
+ int i2)
{
TCGv_i64 tmp, tmp2;
TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
break;
default:
LOG_DISAS("illegal a7 operation 0x%x\n", op);
- gen_illegal_opcode(s, 2);
+ gen_illegal_opcode(env, s, 2);
return;
}
}
-static void disas_b2(DisasContext *s, int op, uint32_t insn)
+static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
+ uint32_t insn)
{
TCGv_i64 tmp, tmp2, tmp3;
TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
#ifndef CONFIG_USER_ONLY
case 0x02: /* STIDP D2(B2) [S] */
/* Store CPU ID */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
potential_page_fault(s);
break;
case 0x04: /* SCK D2(B2) [S] */
/* Set Clock */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
potential_page_fault(s);
break;
case 0x06: /* SCKC D2(B2) [S] */
/* Set Clock Comparator */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
potential_page_fault(s);
break;
case 0x07: /* STCKC D2(B2) [S] */
/* Store Clock Comparator */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
potential_page_fault(s);
break;
case 0x08: /* SPT D2(B2) [S] */
/* Set CPU Timer */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
potential_page_fault(s);
break;
case 0x09: /* STPT D2(B2) [S] */
/* Store CPU Timer */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
potential_page_fault(s);
break;
case 0x0a: /* SPKA D2(B2) [S] */
/* Set PSW Key from Address */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp2 = tcg_temp_new_i64();
break;
case 0x0d: /* PTLB [S] */
/* Purge TLB */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
gen_helper_ptlb(cpu_env);
break;
case 0x10: /* SPX D2(B2) [S] */
/* Set Prefix Register */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
potential_page_fault(s);
break;
case 0x11: /* STPX D2(B2) [S] */
/* Store Prefix */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp2 = tcg_temp_new_i64();
break;
case 0x12: /* STAP D2(B2) [S] */
/* Store CPU Address */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp2 = tcg_temp_new_i64();
break;
case 0x21: /* IPTE R1,R2 [RRE] */
/* Invalidate PTE */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
r1 = (insn >> 4) & 0xf;
r2 = insn & 0xf;
tmp = load_reg(r1);
break;
case 0x29: /* ISKE R1,R2 [RRE] */
/* Insert Storage Key Extended */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
r1 = (insn >> 4) & 0xf;
r2 = insn & 0xf;
tmp = load_reg(r2);
break;
case 0x2a: /* RRBE R1,R2 [RRE] */
/* Set Storage Key Extended */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
r1 = (insn >> 4) & 0xf;
r2 = insn & 0xf;
tmp32_1 = load_reg32(r1);
break;
case 0x2b: /* SSKE R1,R2 [RRE] */
/* Set Storage Key Extended */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
r1 = (insn >> 4) & 0xf;
r2 = insn & 0xf;
tmp32_1 = load_reg32(r1);
break;
case 0x34: /* STCH ? */
/* Store Subchannel */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
gen_op_movi_cc(s, 3);
break;
case 0x46: /* STURA R1,R2 [RRE] */
/* Store Using Real Address */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
r1 = (insn >> 4) & 0xf;
r2 = insn & 0xf;
tmp32_1 = load_reg32(r1);
break;
case 0x50: /* CSP R1,R2 [RRE] */
/* Compare And Swap And Purge */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
r1 = (insn >> 4) & 0xf;
r2 = insn & 0xf;
tmp32_1 = tcg_const_i32(r1);
break;
case 0x5f: /* CHSC ? */
/* Channel Subsystem Call */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
gen_op_movi_cc(s, 3);
break;
case 0x78: /* STCKE D2(B2) [S] */
break;
case 0x79: /* SACF D2(B2) [S] */
/* Store Clock Extended */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
potential_page_fault(s);
s->is_jmp = DISAS_EXCP;
break;
case 0x7d: /* STSI D2,(B2) [S] */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = load_reg32(0);
break;
case 0xb1: /* STFL D2(B2) [S] */
/* Store Facility List (CPU features) at 200 */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
tmp2 = tcg_const_i64(0xc0000000);
tmp = tcg_const_i64(200);
tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
break;
case 0xb2: /* LPSWE D2(B2) [S] */
/* Load PSW Extended */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp2 = tcg_temp_new_i64();
break;
case 0x20: /* SERVC R1,R2 [RRE] */
/* SCLP Service call (PV hypercall) */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
potential_page_fault(s);
tmp32_1 = load_reg32(r2);
tmp = load_reg(r1);
#endif
default:
LOG_DISAS("illegal b2 operation 0x%x\n", op);
- gen_illegal_opcode(s, ilc);
+ gen_illegal_opcode(env, s, ilc);
break;
}
}
-static void disas_b3(DisasContext *s, int op, int m3, int r1, int r2)
+static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
+ int r1, int r2)
{
TCGv_i64 tmp;
TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
break;
default:
LOG_DISAS("illegal b3 operation 0x%x\n", op);
- gen_illegal_opcode(s, 2);
+ gen_illegal_opcode(env, s, 2);
break;
}
#undef FP_HELPER
}
-static void disas_b9(DisasContext *s, int op, int r1, int r2)
+static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
+ int r2)
{
TCGv_i64 tmp, tmp2, tmp3;
TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
break;
default:
LOG_DISAS("illegal b9 operation 0x%x\n", op);
- gen_illegal_opcode(s, 2);
+ gen_illegal_opcode(env, s, 2);
break;
}
}
-static void disas_c0(DisasContext *s, int op, int r1, int i2)
+static void disas_c0(CPUS390XState *env, DisasContext *s, int op, int r1, int i2)
{
TCGv_i64 tmp;
TCGv_i32 tmp32_1, tmp32_2;
break;
default:
LOG_DISAS("illegal c0 operation 0x%x\n", op);
- gen_illegal_opcode(s, 3);
+ gen_illegal_opcode(env, s, 3);
break;
}
}
-static void disas_c2(DisasContext *s, int op, int r1, int i2)
+static void disas_c2(CPUS390XState *env, DisasContext *s, int op, int r1,
+ int i2)
{
TCGv_i64 tmp, tmp2, tmp3;
TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
break;
default:
LOG_DISAS("illegal c2 operation 0x%x\n", op);
- gen_illegal_opcode(s, 3);
+ gen_illegal_opcode(env, s, 3);
break;
}
}
}
}
-static void disas_s390_insn(DisasContext *s)
+static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
{
TCGv_i64 tmp, tmp2, tmp3, tmp4;
TCGv_i32 tmp32_1, tmp32_2, tmp32_3, tmp32_4;
int ilc;
int l1;
- opc = cpu_ldub_code(cpu_single_env, s->pc);
+ opc = cpu_ldub_code(env, s->pc);
LOG_DISAS("opc 0x%x\n", opc);
ilc = get_ilc(opc);
switch (opc) {
#ifndef CONFIG_USER_ONLY
case 0x01: /* SAM */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
/* set addressing mode, but we only do 64bit anyways */
break;
#endif
case 0x6: /* BCTR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r1);
tcg_gen_subi_i32(tmp32_1, tmp32_1, 1);
}
break;
case 0x7: /* BCR M1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
if (r2) {
tmp = load_reg(r2);
}
break;
case 0xa: /* SVC I [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
debug_insn(insn);
i = insn & 0xff;
update_psw_addr(s);
tcg_temp_free_i32(tmp32_3);
break;
case 0xd: /* BASR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 2));
store_reg(r1, tmp);
tcg_temp_free_i64(tmp);
break;
case 0xe: /* MVCL R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = tcg_const_i32(r1);
tmp32_2 = tcg_const_i32(r2);
tcg_temp_free_i32(tmp32_2);
break;
case 0x10: /* LPR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r2);
set_cc_abs32(s, tmp32_1);
tcg_temp_free_i32(tmp32_1);
break;
case 0x11: /* LNR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r2);
set_cc_nabs32(s, tmp32_1);
tcg_temp_free_i32(tmp32_1);
break;
case 0x12: /* LTR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r2);
if (r1 != r2) {
tcg_temp_free_i32(tmp32_1);
break;
case 0x13: /* LCR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r2);
tcg_gen_neg_i32(tmp32_1, tmp32_1);
case 0x14: /* NR R1,R2 [RR] */
case 0x16: /* OR R1,R2 [RR] */
case 0x17: /* XR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_2 = load_reg32(r2);
tmp32_1 = load_reg32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0x18: /* LR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r2);
store_reg32(r1, tmp32_1);
break;
case 0x15: /* CLR R1,R2 [RR] */
case 0x19: /* CR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r1);
tmp32_2 = load_reg32(r2);
break;
case 0x1a: /* AR R1,R2 [RR] */
case 0x1e: /* ALR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r1);
tmp32_2 = load_reg32(r2);
break;
case 0x1b: /* SR R1,R2 [RR] */
case 0x1f: /* SLR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r1);
tmp32_2 = load_reg32(r2);
break;
case 0x1c: /* MR R1,R2 [RR] */
/* reg(r1, r1+1) = reg(r1+1) * reg(r2) */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp2 = load_reg(r2);
tmp3 = load_reg((r1 + 1) & 15);
tcg_temp_free_i64(tmp3);
break;
case 0x1d: /* DR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_reg32(r1);
tmp32_2 = load_reg32(r1 + 1);
tcg_temp_free_i64(tmp3);
break;
case 0x28: /* LDR R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp = load_freg(r2);
store_freg(r1, tmp);
tcg_temp_free_i64(tmp);
break;
case 0x38: /* LER R1,R2 [RR] */
- insn = ld_code2(s->pc);
+ insn = ld_code2(env, s->pc);
decode_rr(s, insn, &r1, &r2);
tmp32_1 = load_freg32(r2);
store_freg32(r1, tmp32_1);
tcg_temp_free_i32(tmp32_1);
break;
case 0x40: /* STH R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = load_reg(r1);
tcg_gen_qemu_st16(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x41: /* la */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
store_reg(r1, tmp); /* FIXME: 31/24-bit addressing */
tcg_temp_free_i64(tmp);
break;
case 0x42: /* STC R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = load_reg(r1);
tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x43: /* IC R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x44: /* EX R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = load_reg(r1);
tmp3 = tcg_const_i64(s->pc + 4);
tcg_temp_free_i64(tmp3);
break;
case 0x46: /* BCT R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tcg_temp_free_i64(tmp);
tcg_temp_free_i64(tmp);
break;
case 0x47: /* BC M1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
gen_bcr(s, r1, tmp, s->pc + 4);
tcg_temp_free_i64(tmp);
s->is_jmp = DISAS_TB_JUMP;
break;
case 0x48: /* LH R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x49: /* CH R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp32_1 = load_reg32(r1);
tmp32_2 = tcg_temp_new_i32();
case 0x4a: /* AH R1,D2(X2,B2) [RX] */
case 0x4b: /* SH R1,D2(X2,B2) [RX] */
case 0x4c: /* MH R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = load_reg32(r1);
tcg_temp_free_i64(tmp2);
break;
case 0x4d: /* BAS R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_const_i64(pc_to_link_info(s, s->pc + 4));
store_reg(r1, tmp2);
s->is_jmp = DISAS_JUMP;
break;
case 0x4e: /* CVD R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = tcg_temp_new_i32();
tcg_temp_free_i32(tmp32_1);
break;
case 0x50: /* st r1, d2(x2, b2) */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = load_reg(r1);
tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x55: /* CL R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = tcg_temp_new_i32();
case 0x54: /* N R1,D2(X2,B2) [RX] */
case 0x56: /* O R1,D2(X2,B2) [RX] */
case 0x57: /* X R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = load_reg32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0x58: /* l r1, d2(x2, b2) */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = tcg_temp_new_i32();
tcg_temp_free_i32(tmp32_1);
break;
case 0x59: /* C R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = tcg_temp_new_i32();
case 0x5b: /* S R1,D2(X2,B2) [RX] */
case 0x5e: /* AL R1,D2(X2,B2) [RX] */
case 0x5f: /* SL R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp32_1 = load_reg32(r1);
tmp32_2 = tcg_temp_new_i32();
break;
case 0x5c: /* M R1,D2(X2,B2) [RX] */
/* reg(r1, r1+1) = reg(r1+1) * *(s32*)addr */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp3);
break;
case 0x5d: /* D R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp3 = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp32_1 = load_reg32(r1);
tmp32_2 = load_reg32(r1 + 1);
tcg_temp_free_i64(tmp3);
break;
case 0x60: /* STD R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = load_freg(r1);
tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x68: /* LD R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x70: /* STE R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = load_freg32(r1);
tcg_temp_free_i32(tmp32_1);
break;
case 0x71: /* MS R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = load_reg32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0x78: /* LE R1,D2(X2,B2) [RX] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp2 = tcg_temp_new_i64();
tmp32_1 = tcg_temp_new_i32();
#ifndef CONFIG_USER_ONLY
case 0x80: /* SSM D2(B2) [S] */
/* Set System Mask */
- check_privileged(s, ilc);
- insn = ld_code4(s->pc);
+ check_privileged(env, s, ilc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp2 = tcg_temp_new_i64();
break;
case 0x82: /* LPSW D2(B2) [S] */
/* Load PSW */
- check_privileged(s, ilc);
- insn = ld_code4(s->pc);
+ check_privileged(env, s, ilc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp2 = tcg_temp_new_i64();
break;
case 0x83: /* DIAG R1,R3,D2 [RS] */
/* Diagnose call (KVM hypercall) */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
potential_page_fault(s);
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp32_1 = tcg_const_i32(insn & 0xfff);
tmp2 = load_reg(2);
case 0x88: /* SRL R1,D2(B2) [RS] */
case 0x89: /* SLL R1,D2(B2) [RS] */
case 0x8a: /* SRA R1,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = load_reg32(r1);
case 0x8c: /* SRDL R1,D2(B2) [RS] */
case 0x8d: /* SLDL R1,D2(B2) [RS] */
case 0x8e: /* SRDA R1,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2); /* shift */
tmp2 = tcg_temp_new_i64();
break;
case 0x98: /* LM R1,R3,D2(B2) [RS] */
case 0x90: /* STM R1,R3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tcg_temp_free_i64(tmp4);
break;
case 0x91: /* TM D1(B1),I2 [SI] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_si(s, insn, &i2, &b1, &d1);
tmp2 = tcg_const_i64(i2);
tcg_gen_qemu_ld8u(tmp, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x92: /* MVI D1(B1),I2 [SI] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_si(s, insn, &i2, &b1, &d1);
tmp2 = tcg_const_i64(i2);
tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
case 0x94: /* NI D1(B1),I2 [SI] */
case 0x96: /* OI D1(B1),I2 [SI] */
case 0x97: /* XI D1(B1),I2 [SI] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_si(s, insn, &i2, &b1, &d1);
tmp2 = tcg_temp_new_i64();
tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x95: /* CLI D1(B1),I2 [SI] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
tmp = decode_si(s, insn, &i2, &b1, &d1);
tmp2 = tcg_temp_new_i64();
tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
tcg_temp_free_i64(tmp2);
break;
case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0xa5:
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
r1 = (insn >> 20) & 0xf;
op = (insn >> 16) & 0xf;
i2 = insn & 0xffff;
- disas_a5(s, op, r1, i2);
+ disas_a5(env, s, op, r1, i2);
break;
case 0xa7:
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
r1 = (insn >> 20) & 0xf;
op = (insn >> 16) & 0xf;
i2 = (short)insn;
- disas_a7(s, op, r1, i2);
+ disas_a7(env, s, op, r1, i2);
break;
case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
#ifndef CONFIG_USER_ONLY
case 0xac: /* STNSM D1(B1),I2 [SI] */
case 0xad: /* STOSM D1(B1),I2 [SI] */
- check_privileged(s, ilc);
- insn = ld_code4(s->pc);
+ check_privileged(env, s, ilc);
+ insn = ld_code4(env, s->pc);
tmp = decode_si(s, insn, &i2, &b1, &d1);
tmp2 = tcg_temp_new_i64();
tcg_gen_shri_i64(tmp2, psw_mask, 56);
tcg_temp_free_i64(tmp2);
break;
case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
- check_privileged(s, ilc);
- insn = ld_code4(s->pc);
+ check_privileged(env, s, ilc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp2 = load_reg(r3);
tcg_temp_free_i32(tmp32_1);
break;
case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
- check_privileged(s, ilc);
- insn = ld_code4(s->pc);
+ check_privileged(env, s, ilc);
+ insn = ld_code4(env, s->pc);
tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
tmp32_1 = tcg_const_i32(r1);
potential_page_fault(s);
break;
#endif
case 0xb2:
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
op = (insn >> 16) & 0xff;
switch (op) {
case 0x9c: /* STFPC D2(B2) [S] */
tcg_temp_free_i64(tmp2);
break;
default:
- disas_b2(s, op, insn);
+ disas_b2(env, s, op, insn);
break;
}
break;
case 0xb3:
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
op = (insn >> 16) & 0xff;
r3 = (insn >> 12) & 0xf; /* aka m3 */
r1 = (insn >> 4) & 0xf;
r2 = insn & 0xf;
- disas_b3(s, op, r3, r1, r2);
+ disas_b3(env, s, op, r3, r1, r2);
break;
#ifndef CONFIG_USER_ONLY
case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
/* Store Control */
- check_privileged(s, ilc);
- insn = ld_code4(s->pc);
+ check_privileged(env, s, ilc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
break;
case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
/* Load Control */
- check_privileged(s, ilc);
- insn = ld_code4(s->pc);
+ check_privileged(env, s, ilc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
break;
#endif
case 0xb9:
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
r1 = (insn >> 4) & 0xf;
r2 = insn & 0xf;
op = (insn >> 16) & 0xff;
- disas_b9(s, op, r1, r2);
+ disas_b9(env, s, op, r1, r2);
break;
case 0xba: /* CS R1,R3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = tcg_const_i32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = load_reg32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
tmp = get_address(s, 0, b2, d2);
tmp32_1 = load_reg32(r1);
tcg_temp_free_i32(tmp32_2);
break;
case 0xbf: /* ICM R1,M3,D2(B2) [RS] */
- insn = ld_code4(s->pc);
+ insn = ld_code4(env, s->pc);
decode_rs(s, insn, &r1, &r3, &b2, &d2);
if (r3 == 15) {
/* effectively a 32-bit load */
break;
case 0xc0:
case 0xc2:
- insn = ld_code6(s->pc);
+ insn = ld_code6(env, s->pc);
r1 = (insn >> 36) & 0xf;
op = (insn >> 32) & 0xf;
i2 = (int)insn;
switch (opc) {
case 0xc0:
- disas_c0(s, op, r1, i2);
+ disas_c0(env, s, op, r1, i2);
break;
case 0xc2:
- disas_c2(s, op, r1, i2);
+ disas_c2(env, s, op, r1, i2);
break;
default:
tcg_abort();
case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
- insn = ld_code6(s->pc);
+ insn = ld_code6(env, s->pc);
vl = tcg_const_i32((insn >> 32) & 0xff);
b1 = (insn >> 28) & 0xf;
b2 = (insn >> 12) & 0xf;
#ifndef CONFIG_USER_ONLY
case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
- check_privileged(s, ilc);
+ check_privileged(env, s, ilc);
potential_page_fault(s);
- insn = ld_code6(s->pc);
+ insn = ld_code6(env, s->pc);
r1 = (insn >> 36) & 0xf;
r3 = (insn >> 32) & 0xf;
b1 = (insn >> 28) & 0xf;
break;
#endif
case 0xe3:
- insn = ld_code6(s->pc);
+ insn = ld_code6(env, s->pc);
debug_insn(insn);
op = insn & 0xff;
r1 = (insn >> 36) & 0xf;
b2 = (insn >> 28) & 0xf;
d2 = ((int)((((insn >> 16) & 0xfff)
| ((insn << 4) & 0xff000)) << 12)) >> 12;
- disas_e3(s, op, r1, x2, b2, d2 );
+ disas_e3(env, s, op, r1, x2, b2, d2 );
break;
#ifndef CONFIG_USER_ONLY
case 0xe5:
/* Test Protection */
- check_privileged(s, ilc);
- insn = ld_code6(s->pc);
+ check_privileged(env, s, ilc);
+ insn = ld_code6(env, s->pc);
debug_insn(insn);
- disas_e5(s, insn);
+ disas_e5(env, s, insn);
break;
#endif
case 0xeb:
- insn = ld_code6(s->pc);
+ insn = ld_code6(env, s->pc);
debug_insn(insn);
op = insn & 0xff;
r1 = (insn >> 36) & 0xf;
b2 = (insn >> 28) & 0xf;
d2 = ((int)((((insn >> 16) & 0xfff)
| ((insn << 4) & 0xff000)) << 12)) >> 12;
- disas_eb(s, op, r1, r3, b2, d2);
+ disas_eb(env, s, op, r1, r3, b2, d2);
break;
case 0xed:
- insn = ld_code6(s->pc);
+ insn = ld_code6(env, s->pc);
debug_insn(insn);
op = insn & 0xff;
r1 = (insn >> 36) & 0xf;
b2 = (insn >> 28) & 0xf;
d2 = (short)((insn >> 16) & 0xfff);
r1b = (insn >> 12) & 0xf;
- disas_ed(s, op, r1, x2, b2, d2, r1b);
+ disas_ed(env, s, op, r1, x2, b2, d2, r1b);
break;
default:
qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
- gen_illegal_opcode(s, ilc);
+ gen_illegal_opcode(env, s, ilc);
break;
}
LOG_DISAS("pc " TARGET_FMT_lx "\n",
dc.pc);
#endif
- disas_s390_insn(&dc);
+ disas_s390_insn(env, &dc);
num_insns++;
if (env->singlestep_enabled) {
obj-y += translate.o op_helper.o helper.o cpu.o
obj-$(CONFIG_SOFTMMU) += machine.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
#include "def-helper.h"
-DEF_HELPER_0(ldtlb, void)
-DEF_HELPER_0(raise_illegal_instruction, void)
-DEF_HELPER_0(raise_slot_illegal_instruction, void)
-DEF_HELPER_0(raise_fpu_disable, void)
-DEF_HELPER_0(raise_slot_fpu_disable, void)
-DEF_HELPER_0(debug, void)
-DEF_HELPER_1(sleep, void, i32)
-DEF_HELPER_1(trapa, void, i32)
+DEF_HELPER_1(ldtlb, void, env)
+DEF_HELPER_1(raise_illegal_instruction, void, env)
+DEF_HELPER_1(raise_slot_illegal_instruction, void, env)
+DEF_HELPER_1(raise_fpu_disable, void, env)
+DEF_HELPER_1(raise_slot_fpu_disable, void, env)
+DEF_HELPER_1(debug, void, env)
+DEF_HELPER_2(sleep, void, env, i32)
+DEF_HELPER_2(trapa, void, env, i32)
-DEF_HELPER_2(movcal, void, i32, i32)
-DEF_HELPER_0(discard_movcal_backup, void)
-DEF_HELPER_1(ocbi, void, i32)
+DEF_HELPER_3(movcal, void, env, i32, i32)
+DEF_HELPER_1(discard_movcal_backup, void, env)
+DEF_HELPER_2(ocbi, void, env, i32)
-DEF_HELPER_2(addv, i32, i32, i32)
-DEF_HELPER_2(addc, i32, i32, i32)
-DEF_HELPER_2(subv, i32, i32, i32)
-DEF_HELPER_2(subc, i32, i32, i32)
-DEF_HELPER_2(div1, i32, i32, i32)
-DEF_HELPER_2(macl, void, i32, i32)
-DEF_HELPER_2(macw, void, i32, i32)
+DEF_HELPER_3(addv, i32, env, i32, i32)
+DEF_HELPER_3(addc, i32, env, i32, i32)
+DEF_HELPER_3(subv, i32, env, i32, i32)
+DEF_HELPER_3(subc, i32, env, i32, i32)
+DEF_HELPER_3(div1, i32, env, i32, i32)
+DEF_HELPER_3(macl, void, env, i32, i32)
+DEF_HELPER_3(macw, void, env, i32, i32)
-DEF_HELPER_1(ld_fpscr, void, i32)
+DEF_HELPER_2(ld_fpscr, void, env, i32)
DEF_HELPER_1(fabs_FT, f32, f32)
DEF_HELPER_1(fabs_DT, f64, f64)
-DEF_HELPER_2(fadd_FT, f32, f32, f32)
-DEF_HELPER_2(fadd_DT, f64, f64, f64)
-DEF_HELPER_1(fcnvsd_FT_DT, f64, f32)
-DEF_HELPER_1(fcnvds_DT_FT, f32, f64)
+DEF_HELPER_3(fadd_FT, f32, env, f32, f32)
+DEF_HELPER_3(fadd_DT, f64, env, f64, f64)
+DEF_HELPER_2(fcnvsd_FT_DT, f64, env, f32)
+DEF_HELPER_2(fcnvds_DT_FT, f32, env, f64)
-DEF_HELPER_2(fcmp_eq_FT, void, f32, f32)
-DEF_HELPER_2(fcmp_eq_DT, void, f64, f64)
-DEF_HELPER_2(fcmp_gt_FT, void, f32, f32)
-DEF_HELPER_2(fcmp_gt_DT, void, f64, f64)
-DEF_HELPER_2(fdiv_FT, f32, f32, f32)
-DEF_HELPER_2(fdiv_DT, f64, f64, f64)
-DEF_HELPER_1(float_FT, f32, i32)
-DEF_HELPER_1(float_DT, f64, i32)
-DEF_HELPER_3(fmac_FT, f32, f32, f32, f32)
-DEF_HELPER_2(fmul_FT, f32, f32, f32)
-DEF_HELPER_2(fmul_DT, f64, f64, f64)
+DEF_HELPER_3(fcmp_eq_FT, void, env, f32, f32)
+DEF_HELPER_3(fcmp_eq_DT, void, env, f64, f64)
+DEF_HELPER_3(fcmp_gt_FT, void, env, f32, f32)
+DEF_HELPER_3(fcmp_gt_DT, void, env, f64, f64)
+DEF_HELPER_3(fdiv_FT, f32, env, f32, f32)
+DEF_HELPER_3(fdiv_DT, f64, env, f64, f64)
+DEF_HELPER_2(float_FT, f32, env, i32)
+DEF_HELPER_2(float_DT, f64, env, i32)
+DEF_HELPER_4(fmac_FT, f32, env, f32, f32, f32)
+DEF_HELPER_3(fmul_FT, f32, env, f32, f32)
+DEF_HELPER_3(fmul_DT, f64, env, f64, f64)
DEF_HELPER_1(fneg_T, f32, f32)
-DEF_HELPER_2(fsub_FT, f32, f32, f32)
-DEF_HELPER_2(fsub_DT, f64, f64, f64)
-DEF_HELPER_1(fsqrt_FT, f32, f32)
-DEF_HELPER_1(fsqrt_DT, f64, f64)
-DEF_HELPER_1(ftrc_FT, i32, f32)
-DEF_HELPER_1(ftrc_DT, i32, f64)
-DEF_HELPER_2(fipr, void, i32, i32)
-DEF_HELPER_1(ftrv, void, i32)
+DEF_HELPER_3(fsub_FT, f32, env, f32, f32)
+DEF_HELPER_3(fsub_DT, f64, env, f64, f64)
+DEF_HELPER_2(fsqrt_FT, f32, env, f32)
+DEF_HELPER_2(fsqrt_DT, f64, env, f64)
+DEF_HELPER_2(ftrc_FT, i32, env, f32)
+DEF_HELPER_2(ftrc_DT, i32, env, f64)
+DEF_HELPER_3(fipr, void, env, i32, i32)
+DEF_HELPER_2(ftrv, void, env, i32)
#include "def-helper.h"
#include <assert.h>
#include <stdlib.h>
#include "cpu.h"
-#include "dyngen-exec.h"
#include "helper.h"
-static void cpu_restore_state_from_retaddr(uintptr_t retaddr)
+static void cpu_restore_state_from_retaddr(CPUSH4State *env, uintptr_t retaddr)
{
TranslationBlock *tb;
#define SHIFT 3
#include "softmmu_template.h"
-void tlb_fill(CPUSH4State *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUSH4State *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
- CPUSH4State *saved_env;
int ret;
- saved_env = env;
- env = env1;
ret = cpu_sh4_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (ret) {
/* now we have a real cpu fault */
- cpu_restore_state_from_retaddr(retaddr);
+ cpu_restore_state_from_retaddr(env, retaddr);
cpu_loop_exit(env);
}
- env = saved_env;
}
#endif
-void helper_ldtlb(void)
+void helper_ldtlb(CPUSH4State *env)
{
#ifdef CONFIG_USER_ONLY
/* XXXXX */
#endif
}
-static inline void raise_exception(int index, uintptr_t retaddr)
+static inline void raise_exception(CPUSH4State *env, int index,
+ uintptr_t retaddr)
{
env->exception_index = index;
- cpu_restore_state_from_retaddr(retaddr);
+ cpu_restore_state_from_retaddr(env, retaddr);
cpu_loop_exit(env);
}
-void helper_raise_illegal_instruction(void)
+void helper_raise_illegal_instruction(CPUSH4State *env)
{
- raise_exception(0x180, GETPC());
+ raise_exception(env, 0x180, GETPC());
}
-void helper_raise_slot_illegal_instruction(void)
+void helper_raise_slot_illegal_instruction(CPUSH4State *env)
{
- raise_exception(0x1a0, GETPC());
+ raise_exception(env, 0x1a0, GETPC());
}
-void helper_raise_fpu_disable(void)
+void helper_raise_fpu_disable(CPUSH4State *env)
{
- raise_exception(0x800, GETPC());
+ raise_exception(env, 0x800, GETPC());
}
-void helper_raise_slot_fpu_disable(void)
+void helper_raise_slot_fpu_disable(CPUSH4State *env)
{
- raise_exception(0x820, GETPC());
+ raise_exception(env, 0x820, GETPC());
}
-void helper_debug(void)
+void helper_debug(CPUSH4State *env)
{
env->exception_index = EXCP_DEBUG;
cpu_loop_exit(env);
}
-void helper_sleep(uint32_t next_pc)
+void helper_sleep(CPUSH4State *env, uint32_t next_pc)
{
env->halted = 1;
env->in_sleep = 1;
cpu_loop_exit(env);
}
-void helper_trapa(uint32_t tra)
+void helper_trapa(CPUSH4State *env, uint32_t tra)
{
env->tra = tra << 2;
- raise_exception(0x160, GETPC());
+ raise_exception(env, 0x160, GETPC());
}
-void helper_movcal(uint32_t address, uint32_t value)
+void helper_movcal(CPUSH4State *env, uint32_t address, uint32_t value)
{
if (cpu_sh4_is_cached (env, address))
{
}
}
-void helper_discard_movcal_backup(void)
+void helper_discard_movcal_backup(CPUSH4State *env)
{
memory_content *current = env->movcal_backup;
}
}
-void helper_ocbi(uint32_t address)
+void helper_ocbi(CPUSH4State *env, uint32_t address)
{
memory_content **current = &(env->movcal_backup);
while (*current)
if ((a & ~0x1F) == (address & ~0x1F))
{
memory_content *next = (*current)->next;
- stl(a, (*current)->value);
+ cpu_stl_data(env, a, (*current)->value);
if (next == NULL)
{
}
}
-uint32_t helper_addc(uint32_t arg0, uint32_t arg1)
+uint32_t helper_addc(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
{
uint32_t tmp0, tmp1;
return arg1;
}
-uint32_t helper_addv(uint32_t arg0, uint32_t arg1)
+uint32_t helper_addv(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
{
uint32_t dest, src, ans;
#define SETM env->sr |= SR_M
#define CLRM env->sr &= ~SR_M
-uint32_t helper_div1(uint32_t arg0, uint32_t arg1)
+uint32_t helper_div1(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
{
uint32_t tmp0, tmp2;
uint8_t old_q, tmp1 = 0xff;
return arg1;
}
-void helper_macl(uint32_t arg0, uint32_t arg1)
+void helper_macl(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
{
int64_t res;
}
}
-void helper_macw(uint32_t arg0, uint32_t arg1)
+void helper_macw(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
{
int64_t res;
}
}
-uint32_t helper_subc(uint32_t arg0, uint32_t arg1)
+uint32_t helper_subc(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
{
uint32_t tmp0, tmp1;
return arg1;
}
-uint32_t helper_subv(uint32_t arg0, uint32_t arg1)
+uint32_t helper_subv(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
{
int32_t dest, src, ans;
return arg1;
}
-static inline void set_t(void)
+static inline void set_t(CPUSH4State *env)
{
env->sr |= SR_T;
}
-static inline void clr_t(void)
+static inline void clr_t(CPUSH4State *env)
{
env->sr &= ~SR_T;
}
-void helper_ld_fpscr(uint32_t val)
+void helper_ld_fpscr(CPUSH4State *env, uint32_t val)
{
env->fpscr = val & FPSCR_MASK;
if ((val & FPSCR_RM_MASK) == FPSCR_RM_ZERO) {
set_flush_to_zero((val & FPSCR_DN) != 0, &env->fp_status);
}
-static void update_fpscr(uintptr_t retaddr)
+static void update_fpscr(CPUSH4State *env, uintptr_t retaddr)
{
int xcpt, cause, enable;
cause = (env->fpscr & FPSCR_CAUSE_MASK) >> FPSCR_CAUSE_SHIFT;
enable = (env->fpscr & FPSCR_ENABLE_MASK) >> FPSCR_ENABLE_SHIFT;
if (cause & enable) {
- cpu_restore_state_from_retaddr(retaddr);
+ cpu_restore_state_from_retaddr(env, retaddr);
env->exception_index = 0x120;
cpu_loop_exit(env);
}
return float64_abs(t0);
}
-float32 helper_fadd_FT(float32 t0, float32 t1)
+float32 helper_fadd_FT(CPUSH4State *env, float32 t0, float32 t1)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float32_add(t0, t1, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-float64 helper_fadd_DT(float64 t0, float64 t1)
+float64 helper_fadd_DT(CPUSH4State *env, float64 t0, float64 t1)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float64_add(t0, t1, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-void helper_fcmp_eq_FT(float32 t0, float32 t1)
+void helper_fcmp_eq_FT(CPUSH4State *env, float32 t0, float32 t1)
{
int relation;
set_float_exception_flags(0, &env->fp_status);
relation = float32_compare(t0, t1, &env->fp_status);
if (unlikely(relation == float_relation_unordered)) {
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
} else if (relation == float_relation_equal) {
- set_t();
+ set_t(env);
} else {
- clr_t();
+ clr_t(env);
}
}
-void helper_fcmp_eq_DT(float64 t0, float64 t1)
+void helper_fcmp_eq_DT(CPUSH4State *env, float64 t0, float64 t1)
{
int relation;
set_float_exception_flags(0, &env->fp_status);
relation = float64_compare(t0, t1, &env->fp_status);
if (unlikely(relation == float_relation_unordered)) {
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
} else if (relation == float_relation_equal) {
- set_t();
+ set_t(env);
} else {
- clr_t();
+ clr_t(env);
}
}
-void helper_fcmp_gt_FT(float32 t0, float32 t1)
+void helper_fcmp_gt_FT(CPUSH4State *env, float32 t0, float32 t1)
{
int relation;
set_float_exception_flags(0, &env->fp_status);
relation = float32_compare(t0, t1, &env->fp_status);
if (unlikely(relation == float_relation_unordered)) {
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
} else if (relation == float_relation_greater) {
- set_t();
+ set_t(env);
} else {
- clr_t();
+ clr_t(env);
}
}
-void helper_fcmp_gt_DT(float64 t0, float64 t1)
+void helper_fcmp_gt_DT(CPUSH4State *env, float64 t0, float64 t1)
{
int relation;
set_float_exception_flags(0, &env->fp_status);
relation = float64_compare(t0, t1, &env->fp_status);
if (unlikely(relation == float_relation_unordered)) {
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
} else if (relation == float_relation_greater) {
- set_t();
+ set_t(env);
} else {
- clr_t();
+ clr_t(env);
}
}
-float64 helper_fcnvsd_FT_DT(float32 t0)
+float64 helper_fcnvsd_FT_DT(CPUSH4State *env, float32 t0)
{
float64 ret;
set_float_exception_flags(0, &env->fp_status);
ret = float32_to_float64(t0, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return ret;
}
-float32 helper_fcnvds_DT_FT(float64 t0)
+float32 helper_fcnvds_DT_FT(CPUSH4State *env, float64 t0)
{
float32 ret;
set_float_exception_flags(0, &env->fp_status);
ret = float64_to_float32(t0, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return ret;
}
-float32 helper_fdiv_FT(float32 t0, float32 t1)
+float32 helper_fdiv_FT(CPUSH4State *env, float32 t0, float32 t1)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float32_div(t0, t1, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-float64 helper_fdiv_DT(float64 t0, float64 t1)
+float64 helper_fdiv_DT(CPUSH4State *env, float64 t0, float64 t1)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float64_div(t0, t1, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-float32 helper_float_FT(uint32_t t0)
+float32 helper_float_FT(CPUSH4State *env, uint32_t t0)
{
float32 ret;
set_float_exception_flags(0, &env->fp_status);
ret = int32_to_float32(t0, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return ret;
}
-float64 helper_float_DT(uint32_t t0)
+float64 helper_float_DT(CPUSH4State *env, uint32_t t0)
{
float64 ret;
set_float_exception_flags(0, &env->fp_status);
ret = int32_to_float64(t0, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return ret;
}
-float32 helper_fmac_FT(float32 t0, float32 t1, float32 t2)
+float32 helper_fmac_FT(CPUSH4State *env, float32 t0, float32 t1, float32 t2)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float32_mul(t0, t1, &env->fp_status);
t0 = float32_add(t0, t2, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-float32 helper_fmul_FT(float32 t0, float32 t1)
+float32 helper_fmul_FT(CPUSH4State *env, float32 t0, float32 t1)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float32_mul(t0, t1, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-float64 helper_fmul_DT(float64 t0, float64 t1)
+float64 helper_fmul_DT(CPUSH4State *env, float64 t0, float64 t1)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float64_mul(t0, t1, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
return float32_chs(t0);
}
-float32 helper_fsqrt_FT(float32 t0)
+float32 helper_fsqrt_FT(CPUSH4State *env, float32 t0)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float32_sqrt(t0, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-float64 helper_fsqrt_DT(float64 t0)
+float64 helper_fsqrt_DT(CPUSH4State *env, float64 t0)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float64_sqrt(t0, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-float32 helper_fsub_FT(float32 t0, float32 t1)
+float32 helper_fsub_FT(CPUSH4State *env, float32 t0, float32 t1)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float32_sub(t0, t1, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-float64 helper_fsub_DT(float64 t0, float64 t1)
+float64 helper_fsub_DT(CPUSH4State *env, float64 t0, float64 t1)
{
set_float_exception_flags(0, &env->fp_status);
t0 = float64_sub(t0, t1, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return t0;
}
-uint32_t helper_ftrc_FT(float32 t0)
+uint32_t helper_ftrc_FT(CPUSH4State *env, float32 t0)
{
uint32_t ret;
set_float_exception_flags(0, &env->fp_status);
ret = float32_to_int32_round_to_zero(t0, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return ret;
}
-uint32_t helper_ftrc_DT(float64 t0)
+uint32_t helper_ftrc_DT(CPUSH4State *env, float64 t0)
{
uint32_t ret;
set_float_exception_flags(0, &env->fp_status);
ret = float64_to_int32_round_to_zero(t0, &env->fp_status);
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
return ret;
}
-void helper_fipr(uint32_t m, uint32_t n)
+void helper_fipr(CPUSH4State *env, uint32_t m, uint32_t n)
{
int bank, i;
float32 r, p;
&env->fp_status);
r = float32_add(r, p, &env->fp_status);
}
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
env->fregs[bank + n + 3] = r;
}
-void helper_ftrv(uint32_t n)
+void helper_ftrv(CPUSH4State *env, uint32_t n)
{
int bank_matrix, bank_vector;
int i, j;
r[i] = float32_add(r[i], p, &env->fp_status);
}
}
- update_fpscr(GETPC());
+ update_fpscr(env, GETPC());
for (i = 0 ; i < 4 ; i++) {
env->fregs[bank_vector + i] = r[i];
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (ctx->singlestep_enabled)
- gen_helper_debug();
+ gen_helper_debug(cpu_env);
tcg_gen_exit_tb(0);
}
}
delayed jump as immediate jump are conditinal jumps */
tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
if (ctx->singlestep_enabled)
- gen_helper_debug();
+ gen_helper_debug(cpu_env);
tcg_gen_exit_tb(0);
} else {
gen_goto_tb(ctx, 0, ctx->delayed_pc);
#define CHECK_NOT_DELAY_SLOT \
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
{ \
- gen_helper_raise_slot_illegal_instruction(); \
+ gen_helper_raise_slot_illegal_instruction(cpu_env); \
ctx->bstate = BS_EXCP; \
return; \
}
#define CHECK_PRIVILEGED \
if (IS_USER(ctx)) { \
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
- gen_helper_raise_slot_illegal_instruction(); \
+ gen_helper_raise_slot_illegal_instruction(cpu_env); \
} else { \
- gen_helper_raise_illegal_instruction(); \
+ gen_helper_raise_illegal_instruction(cpu_env); \
} \
ctx->bstate = BS_EXCP; \
return; \
#define CHECK_FPU_ENABLED \
if (ctx->flags & SR_FD) { \
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
- gen_helper_raise_slot_fpu_disable(); \
+ gen_helper_raise_slot_fpu_disable(cpu_env); \
} else { \
- gen_helper_raise_fpu_disable(); \
+ gen_helper_raise_fpu_disable(cpu_env); \
} \
ctx->bstate = BS_EXCP; \
return; \
if (opcode != 0x0093 /* ocbi */
&& opcode != 0x00c3 /* movca.l */)
{
- gen_helper_discard_movcal_backup ();
+ gen_helper_discard_movcal_backup(cpu_env);
ctx->has_movcal = 0;
}
}
return;
case 0x0038: /* ldtlb */
CHECK_PRIVILEGED
- gen_helper_ldtlb();
+ gen_helper_ldtlb(cpu_env);
return;
case 0x002b: /* rte */
CHECK_PRIVILEGED
return;
case 0x001b: /* sleep */
CHECK_PRIVILEGED
- gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
+ gen_helper_sleep(cpu_env, tcg_const_i32(ctx->pc + 2));
return;
}
tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
return;
case 0x300e: /* addc Rm,Rn */
- gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
+ gen_helper_addc(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
return;
case 0x300f: /* addv Rm,Rn */
- gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
+ gen_helper_addv(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
return;
case 0x2009: /* and Rm,Rn */
tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
}
return;
case 0x3004: /* div1 Rm,Rn */
- gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
+ gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
return;
case 0x300d: /* dmuls.l Rm,Rn */
{
tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
arg1 = tcg_temp_new();
tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
- gen_helper_macl(arg0, arg1);
+ gen_helper_macl(cpu_env, arg0, arg1);
tcg_temp_free(arg1);
tcg_temp_free(arg0);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
arg1 = tcg_temp_new();
tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
- gen_helper_macw(arg0, arg1);
+ gen_helper_macw(cpu_env, arg0, arg1);
tcg_temp_free(arg1);
tcg_temp_free(arg0);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
return;
case 0x300a: /* subc Rm,Rn */
- gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
+ gen_helper_subc(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
return;
case 0x300b: /* subv Rm,Rn */
- gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
+ gen_helper_subv(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
return;
case 0x2008: /* tst Rm,Rn */
{
gen_load_fpr64(fp1, DREG(B7_4));
switch (ctx->opcode & 0xf00f) {
case 0xf000: /* fadd Rm,Rn */
- gen_helper_fadd_DT(fp0, fp0, fp1);
+ gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
break;
case 0xf001: /* fsub Rm,Rn */
- gen_helper_fsub_DT(fp0, fp0, fp1);
+ gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
break;
case 0xf002: /* fmul Rm,Rn */
- gen_helper_fmul_DT(fp0, fp0, fp1);
+ gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
break;
case 0xf003: /* fdiv Rm,Rn */
- gen_helper_fdiv_DT(fp0, fp0, fp1);
+ gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
break;
case 0xf004: /* fcmp/eq Rm,Rn */
- gen_helper_fcmp_eq_DT(fp0, fp1);
+ gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
return;
case 0xf005: /* fcmp/gt Rm,Rn */
- gen_helper_fcmp_gt_DT(fp0, fp1);
+ gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
return;
}
gen_store_fpr64(fp0, DREG(B11_8));
} else {
switch (ctx->opcode & 0xf00f) {
case 0xf000: /* fadd Rm,Rn */
- gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
+ gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
break;
case 0xf001: /* fsub Rm,Rn */
- gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
+ gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
break;
case 0xf002: /* fmul Rm,Rn */
- gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
+ gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
break;
case 0xf003: /* fdiv Rm,Rn */
- gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
+ gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
break;
case 0xf004: /* fcmp/eq Rm,Rn */
- gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
+ gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
return;
case 0xf005: /* fcmp/gt Rm,Rn */
- gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
+ gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
return;
}
}
if (ctx->fpscr & FPSCR_PR) {
break; /* illegal instruction */
} else {
- gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
- cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
+ gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
+ cpu_fregs[FREG(B11_8)]);
return;
}
}
TCGv imm;
CHECK_NOT_DELAY_SLOT
imm = tcg_const_i32(B7_0);
- gen_helper_trapa(imm);
+ gen_helper_trapa(cpu_env, imm);
tcg_temp_free(imm);
ctx->bstate = BS_BRANCH;
}
LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
case 0x406a: /* lds Rm,FPSCR */
CHECK_FPU_ENABLED
- gen_helper_ld_fpscr(REG(B11_8));
+ gen_helper_ld_fpscr(cpu_env, REG(B11_8));
ctx->bstate = BS_STOP;
return;
case 0x4066: /* lds.l @Rm+,FPSCR */
TCGv addr = tcg_temp_new();
tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
- gen_helper_ld_fpscr(addr);
+ gen_helper_ld_fpscr(cpu_env, addr);
tcg_temp_free(addr);
ctx->bstate = BS_STOP;
}
{
TCGv val = tcg_temp_new();
tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
- gen_helper_movcal (REG(B11_8), val);
+ gen_helper_movcal(cpu_env, REG(B11_8), val);
tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
}
ctx->has_movcal = 1;
break;
case 0x0093: /* ocbi @Rn */
{
- gen_helper_ocbi (REG(B11_8));
+ gen_helper_ocbi(cpu_env, REG(B11_8));
}
return;
case 0x00a3: /* ocbp @Rn */
if (ctx->opcode & 0x0100)
break; /* illegal instruction */
fp = tcg_temp_new_i64();
- gen_helper_float_DT(fp, cpu_fpul);
+ gen_helper_float_DT(fp, cpu_env, cpu_fpul);
gen_store_fpr64(fp, DREG(B11_8));
tcg_temp_free_i64(fp);
}
else {
- gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
+ gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
}
return;
case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
break; /* illegal instruction */
fp = tcg_temp_new_i64();
gen_load_fpr64(fp, DREG(B11_8));
- gen_helper_ftrc_DT(cpu_fpul, fp);
+ gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
tcg_temp_free_i64(fp);
}
else {
- gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
+ gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
}
return;
case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
break; /* illegal instruction */
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(fp, DREG(B11_8));
- gen_helper_fsqrt_DT(fp, fp);
+ gen_helper_fsqrt_DT(fp, cpu_env, fp);
gen_store_fpr64(fp, DREG(B11_8));
tcg_temp_free_i64(fp);
} else {
- gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
+ gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)]);
}
return;
case 0xf07d: /* fsrra FRn */
CHECK_FPU_ENABLED
{
TCGv_i64 fp = tcg_temp_new_i64();
- gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
+ gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
gen_store_fpr64(fp, DREG(B11_8));
tcg_temp_free_i64(fp);
}
{
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(fp, DREG(B11_8));
- gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
+ gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
tcg_temp_free_i64(fp);
}
return;
TCGv m, n;
m = tcg_const_i32((ctx->opcode >> 8) & 3);
n = tcg_const_i32((ctx->opcode >> 10) & 3);
- gen_helper_fipr(m, n);
+ gen_helper_fipr(cpu_env, m, n);
tcg_temp_free(m);
tcg_temp_free(n);
return;
(ctx->fpscr & FPSCR_PR) == 0) {
TCGv n;
n = tcg_const_i32((ctx->opcode >> 10) & 3);
- gen_helper_ftrv(n);
+ gen_helper_ftrv(cpu_env, n);
tcg_temp_free(n);
return;
}
fflush(stderr);
#endif
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
- gen_helper_raise_slot_illegal_instruction();
+ gen_helper_raise_slot_illegal_instruction(cpu_env);
} else {
- gen_helper_raise_illegal_instruction();
+ gen_helper_raise_illegal_instruction(cpu_env);
}
ctx->bstate = BS_EXCP;
}
if (ctx.pc == bp->pc) {
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx.pc);
- gen_helper_debug();
+ gen_helper_debug(cpu_env);
ctx.bstate = BS_EXCP;
break;
}
fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
fflush(stderr);
#endif
- ctx.opcode = lduw_code(ctx.pc);
+ ctx.opcode = cpu_lduw_code(env, ctx.pc);
decode_opc(&ctx);
num_insns++;
ctx.pc += 2;
gen_io_end();
if (env->singlestep_enabled) {
tcg_gen_movi_i32(cpu_pc, ctx.pc);
- gen_helper_debug();
+ gen_helper_debug(cpu_env);
} else {
switch (ctx.bstate) {
case BS_STOP:
obj-$(TARGET_SPARC) += int32_helper.o
obj-$(TARGET_SPARC64) += int64_helper.o
obj-$(TARGET_SPARC64) += vis_helper.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
obj-y += ucf64_helper.o
obj-$(CONFIG_SOFTMMU) += machine.o softmmu.o
-
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
DEF_HELPER_1(clz, i32, i32)
DEF_HELPER_1(clo, i32, i32)
-DEF_HELPER_1(exception, void, i32)
+DEF_HELPER_2(exception, void, env, i32)
-DEF_HELPER_2(asr_write, void, i32, i32)
-DEF_HELPER_0(asr_read, i32)
+DEF_HELPER_3(asr_write, void, env, i32, i32)
+DEF_HELPER_1(asr_read, i32, env)
-DEF_HELPER_1(get_user_reg, i32, i32)
-DEF_HELPER_2(set_user_reg, void, i32, i32)
+DEF_HELPER_2(get_user_reg, i32, env, i32)
+DEF_HELPER_3(set_user_reg, void, env, i32, i32)
-DEF_HELPER_2(add_cc, i32, i32, i32)
-DEF_HELPER_2(adc_cc, i32, i32, i32)
-DEF_HELPER_2(sub_cc, i32, i32, i32)
-DEF_HELPER_2(sbc_cc, i32, i32, i32)
+DEF_HELPER_3(add_cc, i32, env, i32, i32)
+DEF_HELPER_3(adc_cc, i32, env, i32, i32)
+DEF_HELPER_3(sub_cc, i32, env, i32, i32)
+DEF_HELPER_3(sbc_cc, i32, env, i32, i32)
DEF_HELPER_2(shl, i32, i32, i32)
DEF_HELPER_2(shr, i32, i32, i32)
DEF_HELPER_2(sar, i32, i32, i32)
-DEF_HELPER_2(shl_cc, i32, i32, i32)
-DEF_HELPER_2(shr_cc, i32, i32, i32)
-DEF_HELPER_2(sar_cc, i32, i32, i32)
-DEF_HELPER_2(ror_cc, i32, i32, i32)
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
DEF_HELPER_1(ucf64_get_fpscr, i32, env)
DEF_HELPER_2(ucf64_set_fpscr, void, env, i32)
* later version. See the COPYING file in the top-level directory.
*/
#include "cpu.h"
-#include "dyngen-exec.h"
#include "helper.h"
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
-void HELPER(exception)(uint32_t excp)
+void HELPER(exception)(CPUUniCore32State *env, uint32_t excp)
{
env->exception_index = excp;
cpu_loop_exit(env);
}
-static target_ulong asr_read(void)
+static target_ulong asr_read(CPUUniCore32State *env)
{
int ZF;
ZF = (env->ZF == 0);
(env->CF << 29) | ((env->VF & 0x80000000) >> 3);
}
-target_ulong cpu_asr_read(CPUUniCore32State *env1)
+target_ulong cpu_asr_read(CPUUniCore32State *env)
{
- CPUUniCore32State *saved_env;
- target_ulong ret;
-
- saved_env = env;
- env = env1;
- ret = asr_read();
- env = saved_env;
- return ret;
+ return asr_read(env);
}
-target_ulong HELPER(asr_read)(void)
+target_ulong HELPER(asr_read)(CPUUniCore32State *env)
{
- return asr_read();
+ return asr_read(env);
}
-static void asr_write(target_ulong val, target_ulong mask)
+static void asr_write(CPUUniCore32State *env, target_ulong val,
+ target_ulong mask)
{
if (mask & ASR_NZCV) {
env->ZF = (~val) & ASR_Z;
env->uncached_asr = (env->uncached_asr & ~mask) | (val & mask);
}
-void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask)
+void cpu_asr_write(CPUUniCore32State *env, target_ulong val, target_ulong mask)
{
- CPUUniCore32State *saved_env;
-
- saved_env = env;
- env = env1;
- asr_write(val, mask);
- env = saved_env;
+ asr_write(env, val, mask);
}
-void HELPER(asr_write)(target_ulong val, target_ulong mask)
+void HELPER(asr_write)(CPUUniCore32State *env, target_ulong val,
+ target_ulong mask)
{
- asr_write(val, mask);
+ asr_write(env, val, mask);
}
/* Access to user mode registers from privileged modes. */
-uint32_t HELPER(get_user_reg)(uint32_t regno)
+uint32_t HELPER(get_user_reg)(CPUUniCore32State *env, uint32_t regno)
{
uint32_t val;
return val;
}
-void HELPER(set_user_reg)(uint32_t regno, uint32_t val)
+void HELPER(set_user_reg)(CPUUniCore32State *env, uint32_t regno, uint32_t val)
{
if (regno == 29) {
env->banked_r29[0] = val;
The only way to do that in TCG is a conditional branch, which clobbers
all our temporaries. For now implement these as helper functions. */
-uint32_t HELPER(add_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(add_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b)
{
uint32_t result;
result = a + b;
return result;
}
-uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(adc_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b)
{
uint32_t result;
if (!env->CF) {
return result;
}
-uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(sub_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b)
{
uint32_t result;
result = a - b;
return result;
}
-uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(sbc_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b)
{
uint32_t result;
if (!env->CF) {
return (int32_t)x >> shift;
}
-uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(shl_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(shr_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(sar_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
{
int shift1, shift;
shift1 = i & 0xff;
#define SHIFT 3
#include "softmmu_template.h"
-void tlb_fill(CPUUniCore32State *env1, target_ulong addr, int is_write,
- int mmu_idx, uintptr_t retaddr)
+void tlb_fill(CPUUniCore32State *env, target_ulong addr, int is_write,
+ int mmu_idx, uintptr_t retaddr)
{
TranslationBlock *tb;
- CPUUniCore32State *saved_env;
unsigned long pc;
int ret;
- saved_env = env;
- env = env1;
ret = uc32_cpu_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
}
cpu_loop_exit(env);
}
- env = saved_env;
}
#endif
static inline void gen_set_asr(TCGv var, uint32_t mask)
{
TCGv tmp_mask = tcg_const_i32(mask);
- gen_helper_asr_write(var, tmp_mask);
+ gen_helper_asr_write(cpu_env, var, tmp_mask);
tcg_temp_free_i32(tmp_mask);
}
/* Set NZCV flags from the high 4 bits of var. */
{
TCGv tmp = new_tmp();
tcg_gen_movi_i32(tmp, excp);
- gen_helper_exception(tmp);
+ gen_helper_exception(cpu_env, tmp);
dead_tmp(tmp);
}
if (flags) {
switch (shiftop) {
case 0:
- gen_helper_shl_cc(var, var, shift);
+ gen_helper_shl_cc(var, cpu_env, var, shift);
break;
case 1:
- gen_helper_shr_cc(var, var, shift);
+ gen_helper_shr_cc(var, cpu_env, var, shift);
break;
case 2:
- gen_helper_sar_cc(var, var, shift);
+ gen_helper_sar_cc(var, cpu_env, var, shift);
break;
case 3:
- gen_helper_ror_cc(var, var, shift);
+ gen_helper_ror_cc(var, cpu_env, var, shift);
break;
}
} else {
if (IS_USER(s)) {
ILLEGAL;
}
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
gen_exception_return(s, tmp);
} else {
if (UCOP_SET_S) {
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
} else {
tcg_gen_sub_i32(tmp, tmp, tmp2);
}
break;
case 0x03:
if (UCOP_SET_S) {
- gen_helper_sub_cc(tmp, tmp2, tmp);
+ gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
} else {
tcg_gen_sub_i32(tmp, tmp2, tmp);
}
break;
case 0x04:
if (UCOP_SET_S) {
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
break;
case 0x05:
if (UCOP_SET_S) {
- gen_helper_adc_cc(tmp, tmp, tmp2);
+ gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
} else {
gen_add_carry(tmp, tmp, tmp2);
}
break;
case 0x06:
if (UCOP_SET_S) {
- gen_helper_sbc_cc(tmp, tmp, tmp2);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
} else {
gen_sub_carry(tmp, tmp, tmp2);
}
break;
case 0x07:
if (UCOP_SET_S) {
- gen_helper_sbc_cc(tmp, tmp2, tmp);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
} else {
gen_sub_carry(tmp, tmp2, tmp);
}
break;
case 0x0a:
if (UCOP_SET_S) {
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
}
dead_tmp(tmp);
break;
case 0x0b:
if (UCOP_SET_S) {
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
}
dead_tmp(tmp);
break;
tmp = load_cpu_field(bsr);
} else {
tmp = new_tmp();
- gen_helper_asr_read(tmp);
+ gen_helper_asr_read(tmp, cpu_env);
}
store_reg(s, UCOP_REG_D, tmp);
return;
gen_bx(s, tmp);
} else if (user) {
tmp2 = tcg_const_i32(reg);
- gen_helper_set_user_reg(tmp2, tmp);
+ gen_helper_set_user_reg(cpu_env, tmp2, tmp);
tcg_temp_free_i32(tmp2);
dead_tmp(tmp);
} else if (reg == UCOP_REG_N) {
} else if (user) {
tmp = new_tmp();
tmp2 = tcg_const_i32(reg);
- gen_helper_get_user_reg(tmp, tmp2);
+ gen_helper_get_user_reg(tmp, cpu_env, tmp2);
tcg_temp_free_i32(tmp2);
} else {
tmp = load_reg(s, reg);
{
unsigned int insn;
- insn = ldl_code(s->pc);
+ insn = cpu_ldl_code(env, s->pc);
s->pc += 4;
/* UniCore instructions class:
so don't use these. */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
-#if defined(CONFIG_TCG_PASS_AREG0) && (TARGET_LONG_BITS == 64)
+#if TARGET_LONG_BITS == 64
/* If we're passing env to the helper as r0 and need a regpair
* for the address then r2 will be overwritten as we're setting
* up the args to the helper.
use these. */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
-#if defined(CONFIG_SOFTMMU) && \
- defined(CONFIG_TCG_PASS_AREG0) && (TARGET_LONG_BITS == 64)
+#if defined(CONFIG_SOFTMMU) && (TARGET_LONG_BITS == 64)
/* Avoid clashes with registers being used for helper args */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
#ifdef CONFIG_SOFTMMU
/* r2 is still needed to load data_reg, so don't use it. */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
-#if defined(CONFIG_TCG_PASS_AREG0) && (TARGET_LONG_BITS == 64)
+#if TARGET_LONG_BITS == 64
/* Avoid clashes with registers being used for helper args */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
#endif
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
- int mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
/* Helper routines for marshalling helper function arguments into
* the correct registers and stack.
* trash by moving the earlier arguments into them.
*/
argreg = TCG_REG_R0;
-#ifdef CONFIG_TCG_PASS_AREG0
argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
-#endif
#if TARGET_LONG_BITS == 64
argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
#else
* trash by moving the earlier arguments into them.
*/
argreg = TCG_REG_R0;
-#ifdef CONFIG_TCG_PASS_AREG0
argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
-#endif
#if TARGET_LONG_BITS == 64
argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
#else
#define TCG_TARGET_HAS_GUEST_BASE
enum {
- /* Note: must be synced with dyngen-exec.h */
TCG_AREG0 = TCG_REG_R6,
};
#if defined(CONFIG_SOFTMMU)
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
- int mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
}
tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
-#endif
tcg_out_call(s, qemu_ld_helpers[opc & 3]);
switch (opc) {
tcg_abort();
}
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
-#endif
tcg_out_call(s, qemu_st_helpers[opc]);
/* label2: */
#define TCG_TARGET_HAS_GUEST_BASE
-/* Note: must be synced with dyngen-exec.h */
#define TCG_AREG0 TCG_REG_R17
tcg_regset_set32(ct->u.regs, 0, 0xffff);
tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[0]);
tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[1]);
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[2]);
-#endif
} else {
tcg_regset_set32(ct->u.regs, 0, 0xff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void *qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
- int mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
/* Perform the TLB load and compare.
}
tcg_out_push(s, args[addrlo_idx]);
stack_adjust += 4;
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_push(s, TCG_AREG0);
stack_adjust += 4;
-#endif
#else
/* The first argument is already loaded with addrlo. */
arg_idx = 1;
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx],
mem_index);
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
TCG_AREG0);
-#endif
#endif
tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
}
tcg_out_push(s, args[addrlo_idx]);
stack_adjust += 4;
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_push(s, TCG_AREG0);
stack_adjust += 4;
-#endif
#else
tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
tcg_target_call_iarg_regs[1], data_reg);
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], mem_index);
stack_adjust = 0;
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
TCG_AREG0);
-#endif
#endif
tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
#define TCG_TARGET_HAS_GUEST_BASE
-/* Note: must be synced with dyngen-exec.h */
#if TCG_TARGET_REG_BITS == 64
# define TCG_AREG0 TCG_REG_R14
#else
TCG_REG_P7, TCG_REG_R3, TCG_REG_R57));
}
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_ldl_mmu,
helper_ldq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-#endif
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
{
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
}
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_bundle(s, mII,
tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R58,
TCG_REG_R57, 0, TCG_REG_R56),
tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4,
TCG_REG_R56, 0, TCG_AREG0));
-#endif
if (!bswap || s_bits == 0) {
tcg_out_bundle(s, miB,
tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
}
}
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
static const void * const qemu_st_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
- int mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
{
data_reg = TCG_REG_R2;
}
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_bundle(s, mII,
tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R59,
TCG_REG_R56, 0, TCG_AREG0),
tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
TCG_REG_B0, TCG_REG_B6));
-#else
- tcg_out_bundle(s, miB,
- tcg_opc_m4 (TCG_REG_P6, opc_st_m4[opc],
- data_reg, TCG_REG_R3),
- tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R58,
- mem_index, TCG_REG_R0),
- tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
- TCG_REG_B0, TCG_REG_B6));
-#endif
}
#else /* !CONFIG_SOFTMMU */
#define TCG_TARGET_HAS_not_i32 0 /* xor r1, -1, r3 */
#define TCG_TARGET_HAS_not_i64 0 /* xor r1, -1, r3 */
-/* Note: must be synced with dyngen-exec.h */
#define TCG_AREG0 TCG_REG_R7
/* Guest base is supported */
tcg_regset_set(ct->u.regs, 0xffffffff);
#if defined(CONFIG_SOFTMMU)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
-# if defined(CONFIG_TCG_PASS_AREG0) && (TARGET_LONG_BITS == 64)
+# if (TARGET_LONG_BITS == 64)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
# endif
#endif
tcg_regset_set(ct->u.regs, 0xffffffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
#if defined(CONFIG_SOFTMMU)
-# if (defined(CONFIG_TCG_PASS_AREG0) && TARGET_LONG_BITS == 32) || \
- (!defined(CONFIG_TCG_PASS_AREG0) && TARGET_LONG_BITS == 64)
+# if (TARGET_LONG_BITS == 32)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
# endif
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
-# if defined(CONFIG_TCG_PASS_AREG0) && TARGET_LONG_BITS == 64
+# if TARGET_LONG_BITS == 64
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
# endif
#endif
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
- int mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
#endif
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
/* slow path */
arg_num = 0;
-# ifdef CONFIG_TCG_PASS_AREG0
tcg_out_call_iarg_reg32(s, &arg_num, TCG_AREG0);
-# endif
# if TARGET_LONG_BITS == 64
tcg_out_call_iarg_reg64(s, &arg_num, addr_regl, addr_regh);
# else
/* slow path */
arg_num = 0;
-# ifdef CONFIG_TCG_PASS_AREG0
tcg_out_call_iarg_reg32(s, &arg_num, TCG_AREG0);
-# endif
# if TARGET_LONG_BITS == 64
tcg_out_call_iarg_reg64(s, &arg_num, addr_regl, addr_regh);
# else
#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */
-/* Note: must be synced with dyngen-exec.h */
#define TCG_AREG0 TCG_REG_S0
/* guest base is supported */
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
#if TARGET_LONG_BITS == 64
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
#endif
-#endif
-#else /* !AREG0 */
-#if TARGET_LONG_BITS == 64
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
-#endif
#endif
break;
case 'K': /* qemu_st[8..32] constraint */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
#if TARGET_LONG_BITS == 64
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R8);
#endif
-#endif
-#else /* !AREG0 */
-#if TARGET_LONG_BITS == 64
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
-#endif
#endif
break;
case 'M': /* qemu_st64 constraint */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
-#if defined(CONFIG_TCG_PASS_AREG0)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R8);
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R9);
-#endif
#endif
break;
#else
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
#endif
static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
/* slow path */
ir = 3;
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_mov (s, TCG_TYPE_I32, ir++, TCG_AREG0);
-#endif
#if TARGET_LONG_BITS == 32
tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg);
#else
/* slow path */
ir = 3;
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_mov (s, TCG_TYPE_I32, ir++, TCG_AREG0);
-#endif
#if TARGET_LONG_BITS == 32
tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg);
#else
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
#ifdef CONFIG_SOFTMMU
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R5);
-#endif
#endif
break;
case 'S': /* qemu_st constraint */
#ifdef CONFIG_SOFTMMU
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R5);
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R6);
-#endif
#endif
break;
case 'Z':
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
- int mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
static void tcg_out_tlb_read (TCGContext *s, int r0, int r1, int r2,
int addr_reg, int s_bits, int offset)
/* slow path */
ir = 3;
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_mov (s, TCG_TYPE_I64, ir++, TCG_AREG0);
-#endif
tcg_out_mov (s, TCG_TYPE_I64, ir++, addr_reg);
tcg_out_movi (s, TCG_TYPE_I64, ir++, mem_index);
/* slow path */
ir = 3;
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_mov (s, TCG_TYPE_I64, ir++, TCG_AREG0);
-#endif
tcg_out_mov (s, TCG_TYPE_I64, ir++, addr_reg);
tcg_out_rld (s, RLDICL, ir++, data_reg, 0, 64 - (1 << (3 + opc)));
tcg_out_movi (s, TCG_TYPE_I64, ir++, mem_index);
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
- int mmu_idx) */
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
#endif
static uint8_t *tb_ret_addr;
tcg_abort();
}
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index);
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
TCG_AREG0);
-#endif
tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
} else {
tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
tcg_target_call_iarg_regs[1]);
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
TCG_AREG0);
-#endif
tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
/* sign extension */
#define TCG_TARGET_EXTEND_ARGS 1
enum {
- /* Note: must be synced with dyngen-exec.h */
TCG_AREG0 = TCG_REG_R10,
};
};
#endif
-#ifdef CONFIG_TCG_PASS_AREG0
#define ARG_OFFSET 1
-#else
-#define ARG_OFFSET 0
-#endif
static const int tcg_target_reg_alloc_order[] = {
TCG_REG_L0,
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_regset_reset_reg(ct->u.regs, TCG_REG_O3);
-#endif
break;
case 'I':
ct->ct |= TCG_CT_CONST_S11;
#include "../../softmmu_defs.h"
-#ifdef CONFIG_TCG_PASS_AREG0
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
static const void * const qemu_ld_helpers[4] = {
helper_stl_mmu,
helper_stq_mmu,
};
-#else
-/* legacy helper signature: __ld_mmu(target_ulong addr, int
- mmu_idx) */
-static const void * const qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
-};
-
-/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
- int mmu_idx) */
-static const void * const qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
-};
-#endif
#endif
#if TARGET_LONG_BITS == 32
/* mov */
tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
-#endif
/* XXX: move that code at the end of the TB */
/* qemu_ld_helper[s_bits](arg0, arg1) */
/* mov */
tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
-#ifdef CONFIG_TCG_PASS_AREG0
/* XXX/FIXME: suboptimal */
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
tcg_target_call_iarg_regs[2]);
tcg_target_call_iarg_regs[0]);
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
TCG_AREG0);
-#endif
/* XXX: move that code at the end of the TB */
/* qemu_st_helper[s_bits](arg0, arg1, arg2) */
tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[s_bits]
#define TCG_TARGET_HAS_deposit_i64 0
#endif
-/* Note: must be synced with dyngen-exec.h */
#ifdef CONFIG_SOLARIS
#define TCG_AREG0 TCG_REG_G2
#elif defined(__sparc_v9__)
case INDEX_op_qemu_st8:
case INDEX_op_qemu_st16:
case INDEX_op_qemu_st32:
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_r(s, TCG_AREG0);
-#endif
tcg_out_r(s, *args++);
tcg_out_r(s, *args++);
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
#endif
break;
case INDEX_op_qemu_st64:
-#ifdef CONFIG_TCG_PASS_AREG0
tcg_out_r(s, TCG_AREG0);
-#endif
tcg_out_r(s, *args++);
#if TCG_TARGET_REG_BITS == 32
tcg_out_r(s, *args++);
#endif
#include "qemu-common.h"
-#include "dyngen-exec.h" /* env */
#include "exec-all.h" /* MAX_OPC_PARAM_IARGS */
#include "tcg-op.h"
static tcg_target_ulong tci_reg[TCG_TARGET_NB_REGS];
-#if !defined(CONFIG_TCG_PASS_AREG0)
-# define helper_ldb_mmu(env, addr, mmu_idx) __ldb_mmu(addr, mmu_idx)
-# define helper_ldw_mmu(env, addr, mmu_idx) __ldw_mmu(addr, mmu_idx)
-# define helper_ldl_mmu(env, addr, mmu_idx) __ldl_mmu(addr, mmu_idx)
-# define helper_ldq_mmu(env, addr, mmu_idx) __ldq_mmu(addr, mmu_idx)
-# define helper_stb_mmu(env, addr, val, mmu_idx) __stb_mmu(addr, val, mmu_idx)
-# define helper_stw_mmu(env, addr, val, mmu_idx) __stw_mmu(addr, val, mmu_idx)
-# define helper_stl_mmu(env, addr, val, mmu_idx) __stl_mmu(addr, val, mmu_idx)
-# define helper_stq_mmu(env, addr, val, mmu_idx) __stq_mmu(addr, val, mmu_idx)
-#endif /* !CONFIG_TCG_PASS_AREG0 */
-
static tcg_target_ulong tci_read_reg(TCGReg index)
{
assert(index < ARRAY_SIZE(tci_reg));
qxl_interface_update_area_complete_overflow(int qid, int max) "%d max=%d"
qxl_interface_update_area_complete_schedule_bh(int qid, uint32_t num_dirty) "%d #dirty=%d"
qxl_io_destroy_primary_ignored(int qid, const char *mode) "%d %s"
+qxl_io_log(int qid, const uint8_t *log_buf) "%d %s"
qxl_io_read_unexpected(int qid) "%d"
-qxl_io_unexpected_vga_mode(int qid, uint32_t io_port, const char *desc) "%d 0x%x (%s)"
+qxl_io_unexpected_vga_mode(int qid, uint64_t addr, uint64_t val, const char *desc) "%d 0x%"PRIx64"=%"PRIu64" (%s)"
qxl_io_write(int qid, const char *mode, uint64_t addr, uint64_t val, unsigned size, int async) "%d %s addr=%"PRIu64 " val=%"PRIu64" size=%u async=%d"
qxl_memslot_add_guest(int qid, uint32_t slot_id, uint64_t guest_start, uint64_t guest_end) "%d %u: guest phys 0x%"PRIx64 " - 0x%" PRIx64
qxl_post_load(int qid, const char *mode) "%d %s"
qxl_spice_destroy_surface_wait_complete(int qid, uint32_t id) "%d sid=%d"
qxl_spice_destroy_surface_wait(int qid, uint32_t id, int async) "%d sid=%d async=%d"
qxl_spice_flush_surfaces_async(int qid, uint32_t surface_count, uint32_t num_free_res) "%d s#=%d, res#=%d"
-qxl_spice_monitors_config(int id) "%d"
+qxl_spice_monitors_config(int qid) "%d"
qxl_spice_loadvm_commands(int qid, void *ext, uint32_t count) "%d ext=%p count=%d"
qxl_spice_oom(int qid) "%d"
qxl_spice_reset_cursor(int qid) "%d"
qxl_spice_update_area(int qid, uint32_t surface_id, uint32_t left, uint32_t right, uint32_t top, uint32_t bottom) "%d sid=%d [%d,%d,%d,%d]"
qxl_spice_update_area_rest(int qid, uint32_t num_dirty_rects, uint32_t clear_dirty_region) "%d #d=%d clear=%d"
qxl_surfaces_dirty(int qid, int surface, int offset, int size) "%d surface=%d offset=%d size=%d"
+qxl_send_events(int qid, uint32_t events) "%d %d"
+qxl_set_guest_bug(int qid) "%d"
+qxl_interrupt_client_monitors_config(int qid, int num_heads, void *heads) "%d %d %p"
+qxl_client_monitors_config_unsupported_by_guest(int qid, uint32_t int_mask, void *client_monitors_config) "%d %X %p"
+qxl_client_monitors_config_capped(int qid, int requested, int limit) "%d %d %d"
+qxl_client_monitors_config_crc(int qid, unsigned size, uint32_t crc32) "%d %u %u"
# hw/qxl-render.c
qxl_render_blit_guest_primary_initialized(void) ""
#endif
}
-static SimpleSpiceUpdate *qemu_spice_create_update(SimpleSpiceDisplay *ssd)
+static void qemu_spice_create_one_update(SimpleSpiceDisplay *ssd,
+ QXLRect *rect)
{
SimpleSpiceUpdate *update;
QXLDrawable *drawable;
QXLImage *image;
QXLCommand *cmd;
- uint8_t *src, *dst;
- int by, bw, bh;
+ uint8_t *src, *mirror, *dst;
+ int by, bw, bh, offset, bytes;
struct timespec time_space;
- if (qemu_spice_rect_is_empty(&ssd->dirty)) {
- return NULL;
- };
-
trace_qemu_spice_create_update(
- ssd->dirty.left, ssd->dirty.right,
- ssd->dirty.top, ssd->dirty.bottom);
+ rect->left, rect->right,
+ rect->top, rect->bottom);
update = g_malloc0(sizeof(*update));
drawable = &update->drawable;
image = &update->image;
cmd = &update->ext.cmd;
- bw = ssd->dirty.right - ssd->dirty.left;
- bh = ssd->dirty.bottom - ssd->dirty.top;
+ bw = rect->right - rect->left;
+ bh = rect->bottom - rect->top;
update->bitmap = g_malloc(bw * bh * 4);
- drawable->bbox = ssd->dirty;
+ drawable->bbox = *rect;
drawable->clip.type = SPICE_CLIP_TYPE_NONE;
drawable->effect = QXL_EFFECT_OPAQUE;
drawable->release_info.id = (uintptr_t)update;
image->bitmap.palette = 0;
image->bitmap.format = SPICE_BITMAP_FMT_32BIT;
- if (ssd->conv == NULL) {
- PixelFormat dst = qemu_default_pixelformat(32);
- ssd->conv = qemu_pf_conv_get(&dst, &ssd->ds->surface->pf);
- assert(ssd->conv);
- }
-
- src = ds_get_data(ssd->ds) +
- ssd->dirty.top * ds_get_linesize(ssd->ds) +
- ssd->dirty.left * ds_get_bytes_per_pixel(ssd->ds);
+ offset =
+ rect->top * ds_get_linesize(ssd->ds) +
+ rect->left * ds_get_bytes_per_pixel(ssd->ds);
+ bytes = ds_get_bytes_per_pixel(ssd->ds) * bw;
+ src = ds_get_data(ssd->ds) + offset;
+ mirror = ssd->ds_mirror + offset;
dst = update->bitmap;
for (by = 0; by < bh; by++) {
- qemu_pf_conv_run(ssd->conv, dst, src, bw);
+ memcpy(mirror, src, bytes);
+ qemu_pf_conv_run(ssd->conv, dst, mirror, bw);
src += ds_get_linesize(ssd->ds);
+ mirror += ds_get_linesize(ssd->ds);
dst += image->bitmap.stride;
}
cmd->type = QXL_CMD_DRAW;
cmd->data = (uintptr_t)drawable;
+ QTAILQ_INSERT_TAIL(&ssd->updates, update, next);
+}
+
+static void qemu_spice_create_update(SimpleSpiceDisplay *ssd)
+{
+ static const int blksize = 32;
+ int blocks = (ds_get_width(ssd->ds) + blksize - 1) / blksize;
+ int dirty_top[blocks];
+ int y, yoff, x, xoff, blk, bw;
+ int bpp = ds_get_bytes_per_pixel(ssd->ds);
+ uint8_t *guest, *mirror;
+
+ if (qemu_spice_rect_is_empty(&ssd->dirty)) {
+ return;
+ };
+
+ if (ssd->conv == NULL) {
+ PixelFormat dst = qemu_default_pixelformat(32);
+ ssd->conv = qemu_pf_conv_get(&dst, &ssd->ds->surface->pf);
+ assert(ssd->conv);
+ }
+ if (ssd->ds_mirror == NULL) {
+ int size = ds_get_height(ssd->ds) * ds_get_linesize(ssd->ds);
+ ssd->ds_mirror = g_malloc0(size);
+ }
+
+ for (blk = 0; blk < blocks; blk++) {
+ dirty_top[blk] = -1;
+ }
+
+ guest = ds_get_data(ssd->ds);
+ mirror = ssd->ds_mirror;
+ for (y = ssd->dirty.top; y < ssd->dirty.bottom; y++) {
+ yoff = y * ds_get_linesize(ssd->ds);
+ for (x = ssd->dirty.left; x < ssd->dirty.right; x += blksize) {
+ xoff = x * bpp;
+ blk = x / blksize;
+ bw = MIN(blksize, ssd->dirty.right - x);
+ if (memcmp(guest + yoff + xoff,
+ mirror + yoff + xoff,
+ bw * bpp) == 0) {
+ if (dirty_top[blk] != -1) {
+ QXLRect update = {
+ .top = dirty_top[blk],
+ .bottom = y,
+ .left = x,
+ .right = x + bw,
+ };
+ qemu_spice_create_one_update(ssd, &update);
+ dirty_top[blk] = -1;
+ }
+ } else {
+ if (dirty_top[blk] == -1) {
+ dirty_top[blk] = y;
+ }
+ }
+ }
+ }
+
+ for (x = ssd->dirty.left; x < ssd->dirty.right; x += blksize) {
+ blk = x / blksize;
+ bw = MIN(blksize, ssd->dirty.right - x);
+ if (dirty_top[blk] != -1) {
+ QXLRect update = {
+ .top = dirty_top[blk],
+ .bottom = ssd->dirty.bottom,
+ .left = x,
+ .right = x + bw,
+ };
+ qemu_spice_create_one_update(ssd, &update);
+ dirty_top[blk] = -1;
+ }
+ }
+
memset(&ssd->dirty, 0, sizeof(ssd->dirty));
- return update;
}
/*
- * Called from spice server thread context (via interface_release_ressource)
+ * Called from spice server thread context (via interface_release_resource)
* We do *not* hold the global qemu mutex here, so extra care is needed
* when calling qemu functions. QEMU interfaces used:
* - g_free (underlying glibc free is re-entrant).
{
ssd->ds = ds;
qemu_mutex_init(&ssd->lock);
+ QTAILQ_INIT(&ssd->updates);
ssd->mouse_x = -1;
ssd->mouse_y = -1;
if (ssd->num_surfaces == 0) {
void qemu_spice_display_resize(SimpleSpiceDisplay *ssd)
{
+ SimpleSpiceUpdate *update;
+
dprint(1, "%s:\n", __FUNCTION__);
memset(&ssd->dirty, 0, sizeof(ssd->dirty));
qemu_pf_conv_put(ssd->conv);
ssd->conv = NULL;
+ g_free(ssd->ds_mirror);
+ ssd->ds_mirror = NULL;
qemu_mutex_lock(&ssd->lock);
- if (ssd->update != NULL) {
- qemu_spice_destroy_update(ssd, ssd->update);
- ssd->update = NULL;
+ while ((update = QTAILQ_FIRST(&ssd->updates)) != NULL) {
+ QTAILQ_REMOVE(&ssd->updates, update, next);
+ qemu_spice_destroy_update(ssd, update);
}
qemu_mutex_unlock(&ssd->lock);
qemu_spice_destroy_host_primary(ssd);
vga_hw_update();
qemu_mutex_lock(&ssd->lock);
- if (ssd->update == NULL) {
- ssd->update = qemu_spice_create_update(ssd);
+ if (QTAILQ_EMPTY(&ssd->updates)) {
+ qemu_spice_create_update(ssd);
ssd->notify++;
}
qemu_spice_cursor_refresh_unlocked(ssd);
dprint(3, "%s:\n", __FUNCTION__);
qemu_mutex_lock(&ssd->lock);
- if (ssd->update != NULL) {
- update = ssd->update;
- ssd->update = NULL;
+ update = QTAILQ_FIRST(&ssd->updates);
+ if (update != NULL) {
+ QTAILQ_REMOVE(&ssd->updates, update, next);
*ext = update->ext;
ret = true;
}
struct SimpleSpiceDisplay {
DisplayState *ds;
+ uint8_t *ds_mirror;
void *buf;
int bufsize;
QXLWorker *worker;
* to them must be protected by the lock.
*/
QemuMutex lock;
- SimpleSpiceUpdate *update;
+ QTAILQ_HEAD(, SimpleSpiceUpdate) updates;
QEMUCursor *cursor;
int mouse_x, mouse_y;
};
QXLImage image;
QXLCommandExt ext;
uint8_t *bitmap;
+ QTAILQ_ENTRY(SimpleSpiceUpdate) next;
};
int qemu_spice_rect_is_empty(const QXLRect* r);
*/
#include "config.h"
#include "cpu.h"
-#ifndef CONFIG_TCG_PASS_AREG0
-#include "dyngen-exec.h"
-#endif
#include "disas.h"
#include "tcg.h"
struct sigcontext *uc = puc;
#endif
-#ifndef CONFIG_TCG_PASS_AREG0
- env = env1;
-
- /* XXX: restore cpu registers saved in host registers */
-#endif
-
if (puc) {
/* XXX: use siglongjmp ? */
#ifdef __linux__
TranslationBlock *tb;
int ret;
-#ifndef CONFIG_TCG_PASS_AREG0
- if (cpu_single_env) {
- env = cpu_single_env; /* XXX: find a correct solution for multithread */
- }
-#endif
#if defined(DEBUG_SIGNAL)
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
pc, address, is_write, *(unsigned long *)old_set);