common-obj-$(CONFIG_SLIRP) += $(addprefix slirp/, $(slirp-obj-y))
# xen backend driver support
-common-obj-$(CONFIG_XEN) += xen_backend.o xen_devconfig.o
-common-obj-$(CONFIG_XEN) += xen_console.o xenfb.o xen_disk.o xen_nic.o
+common-obj-$(CONFIG_XEN_BACKEND) += xen_backend.o xen_devconfig.o
+common-obj-$(CONFIG_XEN_BACKEND) += xen_console.o xenfb.o xen_disk.o xen_nic.o
######################################################################
# libuser
hw-obj-y =
hw-obj-y += vl.o loader.o
-hw-obj-$(CONFIG_VIRTIO) += virtio.o virtio-console.o
+hw-obj-$(CONFIG_VIRTIO) += virtio-console.o
hw-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o
hw-obj-y += fw_cfg.o
hw-obj-$(CONFIG_PCI) += pci.o pci_bridge.o
GENERATED_HEADERS = config-target.h
CONFIG_NO_PCI = $(if $(subst n,,$(CONFIG_PCI)),n,y)
CONFIG_NO_KVM = $(if $(subst n,,$(CONFIG_KVM)),n,y)
+CONFIG_NO_XEN = $(if $(subst n,,$(CONFIG_XEN)),n,y)
include ../config-host.mak
include config-devices.mak
# HELPER_CFLAGS is used for all the code compiled with static register
# variables
-%_helper.o user-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+op_helper.o user-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
# Note: this is a workaround. The real fix is to avoid compiling
# cpu_signal_handler() in user-exec.c.
# virtio has to be here due to weird dependency between PCI and virtio-net.
# need to fix this properly
obj-$(CONFIG_NO_PCI) += pci-stub.o
-obj-$(CONFIG_VIRTIO) += virtio-blk.o virtio-balloon.o virtio-net.o virtio-serial-bus.o
+obj-$(CONFIG_VIRTIO) += virtio.o virtio-blk.o virtio-balloon.o virtio-net.o virtio-serial-bus.o
obj-y += vhost_net.o
obj-$(CONFIG_VHOST_NET) += vhost.o
obj-$(CONFIG_REALLY_VIRTFS) += 9pfs/virtio-9p-device.o
QEMU_CFLAGS += $(VNC_JPEG_CFLAGS)
QEMU_CFLAGS += $(VNC_PNG_CFLAGS)
-# xen backend driver support
-obj-i386-$(CONFIG_XEN) += xen_machine_pv.o xen_domainbuild.o
-
-ifeq ($(TARGET_BASE_ARCH), i386)
- CONFIG_NO_XEN = $(if $(subst n,,$(CONFIG_XEN)),n,y)
-else
- CONFIG_NO_XEN = y
-endif
# xen support
-CONFIG_NO_XEN_MAPCACHE = $(if $(subst n,,$(CONFIG_XEN_MAPCACHE)),n,y)
-obj-i386-$(CONFIG_XEN) += xen-all.o
+obj-$(CONFIG_XEN) += xen-all.o xen_machine_pv.o xen_domainbuild.o xen-mapcache.o
obj-$(CONFIG_NO_XEN) += xen-stub.o
-obj-i386-$(CONFIG_XEN_MAPCACHE) += xen-mapcache.o
-obj-$(CONFIG_NO_XEN_MAPCACHE) += xen-mapcache-stub.o
obj-i386-$(CONFIG_XEN) += xen_platform.o
#else
#include "qemu-common.h"
#include "gdbstub.h"
+#include "hw/arm-misc.h"
#endif
#define SYS_OPEN 0x01
return syscall_err;
#endif
case SYS_GET_CMDLINE:
-#ifdef CONFIG_USER_ONLY
- /* Build a commandline from the original argv. */
{
- char *arm_cmdline_buffer;
- const char *host_cmdline_buffer;
+ /* Build a command-line from the original argv.
+ *
+ * The inputs are:
+ * * ARG(0), pointer to a buffer of at least the size
+ * specified in ARG(1).
+ * * ARG(1), size of the buffer pointed to by ARG(0) in
+ * bytes.
+ *
+ * The outputs are:
+ * * ARG(0), pointer to null-terminated string of the
+ * command line.
+ * * ARG(1), length of the string pointed to by ARG(0).
+ */
- unsigned int i;
- unsigned int arm_cmdline_len = ARG(1);
- unsigned int host_cmdline_len =
- ts->info->arg_end-ts->info->arg_start;
+ char *output_buffer;
+ size_t input_size = ARG(1);
+ size_t output_size;
+ int status = 0;
- if (!arm_cmdline_len || host_cmdline_len > arm_cmdline_len) {
- return -1; /* not enough space to store command line */
- }
+ /* Compute the size of the output string. */
+#if !defined(CONFIG_USER_ONLY)
+ output_size = strlen(ts->boot_info->kernel_filename)
+ + 1 /* Separating space. */
+ + strlen(ts->boot_info->kernel_cmdline)
+ + 1; /* Terminating null byte. */
+#else
+ unsigned int i;
- if (!host_cmdline_len) {
+ output_size = ts->info->arg_end - ts->info->arg_start;
+ if (!output_size) {
/* We special-case the "empty command line" case (argc==0).
Just provide the terminating 0. */
- arm_cmdline_buffer = lock_user(VERIFY_WRITE, ARG(0), 1, 0);
- arm_cmdline_buffer[0] = 0;
- unlock_user(arm_cmdline_buffer, ARG(0), 1);
+ output_size = 1;
+ }
+#endif
- /* Adjust the commandline length argument. */
- SET_ARG(1, 0);
- return 0;
+ if (output_size > input_size) {
+ /* Not enough space to store command-line arguments. */
+ return -1;
}
- /* lock the buffers on the ARM side */
- arm_cmdline_buffer =
- lock_user(VERIFY_WRITE, ARG(0), host_cmdline_len, 0);
- host_cmdline_buffer =
- lock_user(VERIFY_READ, ts->info->arg_start,
- host_cmdline_len, 1);
+ /* Adjust the command-line length. */
+ SET_ARG(1, output_size - 1);
- if (arm_cmdline_buffer && host_cmdline_buffer)
- {
- /* the last argument is zero-terminated;
- no need for additional termination */
- memcpy(arm_cmdline_buffer, host_cmdline_buffer,
- host_cmdline_len);
+ /* Lock the buffer on the ARM side. */
+ output_buffer = lock_user(VERIFY_WRITE, ARG(0), output_size, 0);
+ if (!output_buffer) {
+ return -1;
+ }
- /* separate arguments by white spaces */
- for (i = 0; i < host_cmdline_len-1; i++) {
- if (arm_cmdline_buffer[i] == 0) {
- arm_cmdline_buffer[i] = ' ';
- }
- }
+ /* Copy the command-line arguments. */
+#if !defined(CONFIG_USER_ONLY)
+ pstrcpy(output_buffer, output_size, ts->boot_info->kernel_filename);
+ pstrcat(output_buffer, output_size, " ");
+ pstrcat(output_buffer, output_size, ts->boot_info->kernel_cmdline);
+#else
+ if (output_size == 1) {
+ /* Empty command-line. */
+ output_buffer[0] = '\0';
+ goto out;
+ }
- /* Adjust the commandline length argument. */
- SET_ARG(1, host_cmdline_len-1);
+ if (copy_from_user(output_buffer, ts->info->arg_start,
+ output_size)) {
+ status = -1;
+ goto out;
}
- /* Unlock the buffers on the ARM side. */
- unlock_user(arm_cmdline_buffer, ARG(0), host_cmdline_len);
- unlock_user((void*)host_cmdline_buffer, ts->info->arg_start, 0);
+ /* Separate arguments by white spaces. */
+ for (i = 0; i < output_size - 1; i++) {
+ if (output_buffer[i] == 0) {
+ output_buffer[i] = ' ';
+ }
+ }
+ out:
+#endif
+ /* Unlock the buffer on the ARM side. */
+ unlock_user(output_buffer, ARG(0), output_size);
- /* Return success if we could return a commandline. */
- return (arm_cmdline_buffer && host_cmdline_buffer) ? 0 : -1;
+ return status;
}
-#else
- return -1;
-#endif
case SYS_HEAPINFO:
{
uint32_t *ptr;
echo "BLUEZ_CFLAGS=$bluez_cflags" >> $config_host_mak
fi
if test "$xen" = "yes" ; then
- echo "CONFIG_XEN=y" >> $config_host_mak
+ echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak
echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak
fi
if test "$io_thread" = "yes" ; then
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should only be used for ram local to a device. */
void *qemu_get_ram_ptr(ram_addr_t addr);
-void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size);
+void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size);
/* Same but slower, to use for migration, where the order of
* RAMBlocks must not change. */
void *qemu_safe_ram_ptr(ram_addr_t addr);
void qemu_flush_coalesced_mmio_buffer(void);
uint32_t ldub_phys(target_phys_addr_t addr);
-uint32_t lduw_phys(target_phys_addr_t addr);
uint32_t lduw_le_phys(target_phys_addr_t addr);
uint32_t lduw_be_phys(target_phys_addr_t addr);
-uint32_t ldl_phys(target_phys_addr_t addr);
uint32_t ldl_le_phys(target_phys_addr_t addr);
uint32_t ldl_be_phys(target_phys_addr_t addr);
-uint64_t ldq_phys(target_phys_addr_t addr);
uint64_t ldq_le_phys(target_phys_addr_t addr);
uint64_t ldq_be_phys(target_phys_addr_t addr);
-void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
-void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
void stb_phys(target_phys_addr_t addr, uint32_t val);
-void stw_phys(target_phys_addr_t addr, uint32_t val);
void stw_le_phys(target_phys_addr_t addr, uint32_t val);
void stw_be_phys(target_phys_addr_t addr, uint32_t val);
-void stl_phys(target_phys_addr_t addr, uint32_t val);
void stl_le_phys(target_phys_addr_t addr, uint32_t val);
void stl_be_phys(target_phys_addr_t addr, uint32_t val);
-void stq_phys(target_phys_addr_t addr, uint64_t val);
void stq_le_phys(target_phys_addr_t addr, uint64_t val);
void stq_be_phys(target_phys_addr_t addr, uint64_t val);
+#ifdef NEED_CPU_H
+uint32_t lduw_phys(target_phys_addr_t addr);
+uint32_t ldl_phys(target_phys_addr_t addr);
+uint64_t ldq_phys(target_phys_addr_t addr);
+void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
+void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
+void stw_phys(target_phys_addr_t addr, uint32_t val);
+void stl_phys(target_phys_addr_t addr, uint32_t val);
+void stq_phys(target_phys_addr_t addr, uint64_t val);
+#endif
+
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
const uint8_t *buf, int len);
--- /dev/null
+###########################################################################
+#
+# You can pass this file directly to qemu using the -readconfig
+# command line switch.
+#
+# This config file creates a EHCI adapter with companion UHCI
+# controllers as multifunction device in PCI slot "1d".
+#
+# Specify "bus=ehci.0" when creating usb devices to hook them up
+# there.
+#
+
+[device "ehci"]
+ driver = "ich9-usb-ehci1"
+ addr = "1d.7"
+ multifunction = "on"
+
+[device "uhci-1"]
+ driver = "ich9-usb-uhci1"
+ addr = "1d.0"
+ multifunction = "on"
+ masterbus = "ehci.0"
+ firstport = "0"
+
+[device "uhci-2"]
+ driver = "ich9-usb-uhci2"
+ addr = "1d.1"
+ multifunction = "on"
+ masterbus = "ehci.0"
+ firstport = "2"
+
+[device "uhci-3"]
+ driver = "ich9-usb-uhci3"
+ addr = "1d.2"
+ multifunction = "on"
+ masterbus = "ehci.0"
+ firstport = "4"
USB 2.0 Quick Start
===================
-The QEMU EHCI Adapter does *not* support companion controllers. That
-implies there are two completely separate USB busses: One USB 1.1 bus
-driven by the UHCI controller and one USB 2.0 bus driven by the EHCI
-controller. Devices must be attached to the correct controller
-manually.
+The QEMU EHCI Adapter can be used with and without companion
+controllers. See below for the companion controller mode.
+
+When not running in companion controller mode there are two completely
+separate USB busses: One USB 1.1 bus driven by the UHCI controller and
+one USB 2.0 bus driven by the EHCI controller. Devices must be
+attached to the correct controller manually.
The '-usb' switch will make qemu create the UHCI controller as part of
the PIIX3 chipset. The USB 1.1 bus will carry the name "usb.0".
device to the EHCI adapter.
+Companion controller support
+----------------------------
+
+Companion controller support has been added recently. The operational
+model described above with two completely separate busses still works
+fine. Additionally the UHCI and OHCI controllers got the ability to
+attach to a usb bus created by EHCI as companion controllers. This is
+done by specifying the masterbus and firstport properties. masterbus
+specifies the bus name the controller should attach to. firstport
+specifies the first port the controller should attach to, which is
+needed as usually one ehci controller with six ports has three uhci
+companion controllers with two ports each.
+
+There is a config file in docs which will do all this for you, just
+try ...
+
+ qemu -readconfig docs/ich9-ehci-uhci.cfg
+
+... then use "bus=ehci.0" to assign your usb devices to that bus.
+
+
More USB tips & tricks
======================
pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
- do_unassigned_access(addr, 0, 1, 0, 4);
+ cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
#else
cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
#endif
abort();
}
#else
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
xen_ram_alloc(new_block->offset, size);
} else {
new_block->host = qemu_vmalloc(size);
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
munmap(block->host, block->length);
#else
- if (xen_mapcache_enabled()) {
- qemu_invalidate_entry(block->host);
+ if (xen_enabled()) {
+ xen_invalidate_map_cache_entry(block->host);
} else {
qemu_vfree(block->host);
}
QLIST_REMOVE(block, next);
QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
}
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return qemu_map_cache(addr, 0, 0);
+ return xen_map_cache(addr, 0, 0);
} else if (block->host == NULL) {
- block->host = qemu_map_cache(block->offset, block->length, 1);
+ block->host =
+ xen_map_cache(block->offset, block->length, 1);
}
}
return block->host + (addr - block->offset);
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return qemu_map_cache(addr, 0, 0);
+ return xen_map_cache(addr, 0, 0);
} else if (block->host == NULL) {
- block->host = qemu_map_cache(block->offset, block->length, 1);
+ block->host =
+ xen_map_cache(block->offset, block->length, 1);
}
}
return block->host + (addr - block->offset);
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
* but takes a size argument */
-void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size)
+void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
{
- if (xen_mapcache_enabled())
- return qemu_map_cache(addr, *size, 1);
- else {
+ if (*size == 0) {
+ return NULL;
+ }
+ if (xen_enabled()) {
+ return xen_map_cache(addr, *size, 1);
+ } else {
RAMBlock *block;
QLIST_FOREACH(block, &ram_list.blocks, next) {
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
abort();
-
- *size = 0;
- return NULL;
}
}
RAMBlock *block;
uint8_t *host = ptr;
- if (xen_mapcache_enabled()) {
- *ram_addr = qemu_ram_addr_from_mapcache(ptr);
+ if (xen_enabled()) {
+ *ram_addr = xen_ram_addr_from_mapcache(ptr);
return 0;
}
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 1);
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
#endif
return 0;
}
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 2);
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
#endif
return 0;
}
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 4);
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
#endif
return 0;
}
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 1);
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
#endif
}
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 2);
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
#endif
}
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 4);
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
#endif
}
target_phys_addr_t page;
unsigned long pd;
PhysPageDesc *p;
- target_phys_addr_t addr1 = addr;
+ ram_addr_t raddr = ULONG_MAX;
+ ram_addr_t rlen;
+ void *ret;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
*plen = l;
return bounce.buffer;
}
+ if (!todo) {
+ raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ }
len -= l;
addr += l;
todo += l;
}
- *plen = todo;
- return qemu_ram_ptr_length(addr1, plen);
+ rlen = todo;
+ ret = qemu_ram_ptr_length(raddr, &rlen);
+ *plen = rlen;
+ return ret;
}
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
access_len -= l;
}
}
- if (xen_mapcache_enabled()) {
- qemu_invalidate_entry(buffer);
+ if (xen_enabled()) {
+ xen_invalidate_map_cache_entry(buffer);
}
return;
}
* THE SOFTWARE.
*/
-#include "hw.h"
-#include "mips.h"
-#include "nvram.h"
+#include "sysbus.h"
+#include "trace.h"
-//#define DEBUG_NVRAM
-
-typedef struct ds1225y_t
-{
+typedef struct {
+ DeviceState qdev;
uint32_t chip_size;
+ char *filename;
QEMUFile *file;
uint8_t *contents;
- uint8_t protection;
-} ds1225y_t;
-
+} NvRamState;
static uint32_t nvram_readb (void *opaque, target_phys_addr_t addr)
{
- ds1225y_t *s = opaque;
+ NvRamState *s = opaque;
uint32_t val;
val = s->contents[addr];
-
-#ifdef DEBUG_NVRAM
- printf("nvram: read 0x%x at " TARGET_FMT_lx "\n", val, addr);
-#endif
+ trace_nvram_read(addr, val);
return val;
}
static void nvram_writeb (void *opaque, target_phys_addr_t addr, uint32_t val)
{
- ds1225y_t *s = opaque;
+ NvRamState *s = opaque;
-#ifdef DEBUG_NVRAM
- printf("nvram: write 0x%x at " TARGET_FMT_lx "\n", val, addr);
-#endif
+ val &= 0xff;
+ trace_nvram_write(addr, s->contents[addr], val);
- s->contents[addr] = val & 0xff;
+ s->contents[addr] = val;
if (s->file) {
qemu_fseek(s->file, addr, SEEK_SET);
qemu_put_byte(s->file, (int)val);
nvram_writeb(opaque, addr + 3, (val >> 24) & 0xff);
}
-static void nvram_writeb_protected (void *opaque, target_phys_addr_t addr, uint32_t val)
-{
- ds1225y_t *s = opaque;
-
- if (s->protection != 7) {
-#ifdef DEBUG_NVRAM
- printf("nvram: prevent write of 0x%x at " TARGET_FMT_lx "\n", val, addr);
-#endif
- return;
- }
-
- nvram_writeb(opaque, addr, val);
-}
-
-static void nvram_writew_protected (void *opaque, target_phys_addr_t addr, uint32_t val)
-{
- nvram_writeb_protected(opaque, addr, val & 0xff);
- nvram_writeb_protected(opaque, addr + 1, (val >> 8) & 0xff);
-}
-
-static void nvram_writel_protected (void *opaque, target_phys_addr_t addr, uint32_t val)
-{
- nvram_writeb_protected(opaque, addr, val & 0xff);
- nvram_writeb_protected(opaque, addr + 1, (val >> 8) & 0xff);
- nvram_writeb_protected(opaque, addr + 2, (val >> 16) & 0xff);
- nvram_writeb_protected(opaque, addr + 3, (val >> 24) & 0xff);
-}
-
static CPUReadMemoryFunc * const nvram_read[] = {
&nvram_readb,
&nvram_readw,
&nvram_writel,
};
-static CPUWriteMemoryFunc * const nvram_write_protected[] = {
- &nvram_writeb_protected,
- &nvram_writew_protected,
- &nvram_writel_protected,
+static int nvram_post_load(void *opaque, int version_id)
+{
+ NvRamState *s = opaque;
+
+ /* Close file, as filename may has changed in load/store process */
+ if (s->file) {
+ qemu_fclose(s->file);
+ }
+
+ /* Write back nvram contents */
+ s->file = qemu_fopen(s->filename, "wb");
+ if (s->file) {
+ /* Write back contents, as 'wb' mode cleaned the file */
+ qemu_put_buffer(s->file, s->contents, s->chip_size);
+ qemu_fflush(s->file);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_nvram = {
+ .name = "nvram",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .post_load = nvram_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_VARRAY_UINT32(contents, NvRamState, chip_size, 0,
+ vmstate_info_uint8, uint8_t),
+ VMSTATE_END_OF_LIST()
+ }
};
-/* Initialisation routine */
-void *ds1225y_init(target_phys_addr_t mem_base, const char *filename)
+typedef struct {
+ SysBusDevice busdev;
+ NvRamState nvram;
+} SysBusNvRamState;
+
+static int nvram_sysbus_initfn(SysBusDevice *dev)
{
- ds1225y_t *s;
- int mem_indexRW, mem_indexRP;
+ NvRamState *s = &FROM_SYSBUS(SysBusNvRamState, dev)->nvram;
QEMUFile *file;
+ int s_io;
- s = qemu_mallocz(sizeof(ds1225y_t));
- s->chip_size = 0x2000; /* Fixed for ds1225y chip: 8 KiB */
s->contents = qemu_mallocz(s->chip_size);
- s->protection = 7;
+
+ s_io = cpu_register_io_memory(nvram_read, nvram_write, s,
+ DEVICE_NATIVE_ENDIAN);
+ sysbus_init_mmio(dev, s->chip_size, s_io);
/* Read current file */
- file = qemu_fopen(filename, "rb");
+ file = qemu_fopen(s->filename, "rb");
if (file) {
/* Read nvram contents */
qemu_get_buffer(file, s->contents, s->chip_size);
qemu_fclose(file);
}
- s->file = qemu_fopen(filename, "wb");
- if (s->file) {
- /* Write back contents, as 'wb' mode cleaned the file */
- qemu_put_buffer(s->file, s->contents, s->chip_size);
- qemu_fflush(s->file);
- }
+ nvram_post_load(s, 0);
+
+ return 0;
+}
+
+static SysBusDeviceInfo nvram_sysbus_info = {
+ .qdev.name = "ds1225y",
+ .qdev.size = sizeof(SysBusNvRamState),
+ .qdev.vmsd = &vmstate_nvram,
+ .init = nvram_sysbus_initfn,
+ .qdev.props = (Property[]) {
+ DEFINE_PROP_UINT32("size", SysBusNvRamState, nvram.chip_size, 0x2000),
+ DEFINE_PROP_STRING("filename", SysBusNvRamState, nvram.filename),
+ DEFINE_PROP_END_OF_LIST(),
+ },
+};
- /* Read/write memory */
- mem_indexRW = cpu_register_io_memory(nvram_read, nvram_write, s,
- DEVICE_NATIVE_ENDIAN);
- cpu_register_physical_memory(mem_base, s->chip_size, mem_indexRW);
- /* Read/write protected memory */
- mem_indexRP = cpu_register_io_memory(nvram_read, nvram_write_protected, s,
- DEVICE_NATIVE_ENDIAN);
- cpu_register_physical_memory(mem_base + s->chip_size, s->chip_size, mem_indexRP);
- return s;
+static void nvram_register(void)
+{
+ sysbus_register_withprop(&nvram_sysbus_info);
}
+
+device_init(nvram_register)
{
}
-static void softusb_device_destroy(USBBus *bus, USBDevice *dev)
+static void softusb_detach(USBPort *port)
+{
+}
+
+static void softusb_child_detach(USBPort *port, USBDevice *child)
{
}
static USBPortOps softusb_ops = {
.attach = softusb_attach,
+ .detach = softusb_detach,
+ .child_detach = softusb_child_detach,
};
static USBBusOps softusb_bus_ops = {
- .device_destroy = softusb_device_destroy,
};
static void milkymist_softusb_reset(DeviceState *d)
/* bonito.c */
PCIBus *bonito_init(qemu_irq *pic);
-/* ds1225y.c */
-void *ds1225y_init(target_phys_addr_t mem_base, const char *filename);
-void ds1225y_set_protection(void *opaque, int protection);
-
/* g364fb.c */
int g364fb_mm_init(target_phys_addr_t vram_base,
target_phys_addr_t ctrl_base, int it_shift,
#include "loader.h"
#include "mc146818rtc.h"
#include "blockdev.h"
+#include "sysbus.h"
enum jazz_model_e
{
void* rc4030_opaque;
int s_rtc, s_dma_dummy;
NICInfo *nd;
+ DeviceState *dev;
+ SysBusDevice *sysbus;
ISADevice *pit;
DriveInfo *fds[MAX_FD];
qemu_irq esp_reset, dma_enable;
/* FIXME: missing Jazz sound at 0x8000c000, rc4030[2] */
audio_init(i8259, NULL);
- /* NVRAM: Unprotected at 0x9000, Protected at 0xa000, Read only at 0xb000 */
- ds1225y_init(0x80009000, "nvram");
+ /* NVRAM */
+ dev = qdev_create(NULL, "ds1225y");
+ qdev_init_nofail(dev);
+ sysbus = sysbus_from_qdev(dev);
+ sysbus_mmio_map(sysbus, 0, 0x80009000);
/* LED indicator */
jazz_led_init(0x8000f000);
#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
-
-#define PCI_VENDOR_ID_XENSOURCE 0x5853
+#define PCI_DEVICE_ID_INTEL_82801I_UHCI1 0x2934
+#define PCI_DEVICE_ID_INTEL_82801I_UHCI2 0x2935
+#define PCI_DEVICE_ID_INTEL_82801I_UHCI3 0x2936
+#define PCI_DEVICE_ID_INTEL_82801I_UHCI4 0x2937
+#define PCI_DEVICE_ID_INTEL_82801I_UHCI5 0x2938
+#define PCI_DEVICE_ID_INTEL_82801I_UHCI6 0x2939
+#define PCI_DEVICE_ID_INTEL_82801I_EHCI1 0x293a
+#define PCI_DEVICE_ID_INTEL_82801I_EHCI2 0x293c
+
+#define PCI_VENDOR_ID_XEN 0x5853
+#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001
.no_hotplug = 1,
.init = piix3_initfn,
.config_write = piix3_write_config_xen,
+ .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = PCI_DEVICE_ID_INTEL_82371SB_0, // 82371SB PIIX3 PCI-to-ISA bridge (Step A1)
+ .class_id = PCI_CLASS_BRIDGE_ISA,
},{
/* end of list */
}
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu-timer.h"
#include "qxl.h"
static const char *qxl_type[] = {
if (!qxl->cmdlog) {
return;
}
- fprintf(stderr, "qxl-%d/%s:", qxl->id, ring);
+ fprintf(stderr, "%ld qxl-%d/%s:", qemu_get_clock_ns(vm_clock),
+ qxl->id, ring);
fprintf(stderr, " cmd @ 0x%" PRIx64 " %s%s", ext->cmd.data,
qxl_name(qxl_type, ext->cmd.type),
compat ? "(compat)" : "");
info->n_surfaces = NUM_SURFACES;
}
+static const char *qxl_mode_to_string(int mode)
+{
+ switch (mode) {
+ case QXL_MODE_COMPAT:
+ return "compat";
+ case QXL_MODE_NATIVE:
+ return "native";
+ case QXL_MODE_UNDEFINED:
+ return "undefined";
+ case QXL_MODE_VGA:
+ return "vga";
+ }
+ return "INVALID";
+}
+
/* called from spice server thread context only */
static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext)
{
}
qemu_mutex_unlock(&qxl->ssd.lock);
if (ret) {
+ dprint(qxl, 2, "%s %s\n", __FUNCTION__, qxl_mode_to_string(qxl->mode));
qxl_log_command(qxl, "vga", ext);
}
return ret;
case QXL_MODE_COMPAT:
case QXL_MODE_NATIVE:
case QXL_MODE_UNDEFINED:
- dprint(qxl, 2, "%s: %s\n", __FUNCTION__,
- qxl->cmdflags ? "compat" : "native");
+ dprint(qxl, 4, "%s: %s\n", __FUNCTION__, qxl_mode_to_string(qxl->mode));
ring = &qxl->ram->cmd_ring;
if (SPICE_RING_IS_EMPTY(ring)) {
return false;
}
+ dprint(qxl, 2, "%s: %s\n", __FUNCTION__, qxl_mode_to_string(qxl->mode));
SPICE_RING_CONS_ITEM(ring, cmd);
ext->cmd = *cmd;
ext->group_id = MEMSLOT_GROUP_GUEST;
case QXL_IO_MEMSLOT_ADD:
case QXL_IO_MEMSLOT_DEL:
case QXL_IO_CREATE_PRIMARY:
+ case QXL_IO_UPDATE_IRQ:
+ case QXL_IO_LOG:
break;
default:
if (d->mode == QXL_MODE_NATIVE || d->mode == QXL_MODE_COMPAT)
break;
case QXL_IO_LOG:
if (d->guestdebug) {
- fprintf(stderr, "qxl/guest: %s", d->ram->log_buf);
+ fprintf(stderr, "qxl/guest-%d: %ld: %s", d->id,
+ qemu_get_clock_ns(vm_clock), d->ram->log_buf);
}
break;
case QXL_IO_RESET:
break;
case QXL_IO_DESTROY_PRIMARY:
PANIC_ON(val != 0);
- dprint(d, 1, "QXL_IO_DESTROY_PRIMARY\n");
+ dprint(d, 1, "QXL_IO_DESTROY_PRIMARY (%s)\n", qxl_mode_to_string(d->mode));
qxl_destroy_primary(d);
break;
case QXL_IO_DESTROY_SURFACE_WAIT:
qemu_spice_vm_change_state_handler(&qxl->ssd, running, reason);
if (!running && qxl->mode == QXL_MODE_NATIVE) {
- /* dirty all vram (which holds surfaces) to make sure it is saved */
+ /* dirty all vram (which holds surfaces) and devram (primary surface)
+ * to make sure they are saved */
/* FIXME #1: should go out during "live" stage */
/* FIXME #2: we only need to save the areas which are actually used */
- ram_addr_t addr = qxl->vram_offset;
- qxl_set_dirty(addr, addr + qxl->vram_size);
+ ram_addr_t vram_addr = qxl->vram_offset;
+ ram_addr_t surface0_addr = qxl->vga.vram_offset + qxl->shadow_rom.draw_area_offset;
+ qxl_set_dirty(vram_addr, vram_addr + qxl->vram_size);
+ qxl_set_dirty(surface0_addr, surface0_addr + qxl->shadow_rom.surface0_area_size);
}
}
static int qxl_init_common(PCIQXLDevice *qxl)
{
uint8_t* config = qxl->pci.config;
- uint32_t pci_device_id;
uint32_t pci_device_rev;
uint32_t io_size;
switch (qxl->revision) {
case 1: /* spice 0.4 -- qxl-1 */
- pci_device_id = QXL_DEVICE_ID_STABLE;
pci_device_rev = QXL_REVISION_STABLE_V04;
break;
case 2: /* spice 0.6 -- qxl-2 */
- pci_device_id = QXL_DEVICE_ID_STABLE;
+ default:
pci_device_rev = QXL_REVISION_STABLE_V06;
break;
- default: /* experimental */
- pci_device_id = QXL_DEVICE_ID_DEVEL;
- pci_device_rev = 1;
- break;
}
- pci_config_set_device_id(config, pci_device_id);
pci_set_byte(&config[PCI_REVISION_ID], pci_device_rev);
pci_set_byte(&config[PCI_INTERRUPT_PIN], 1);
d->modes = (QXLModes*)((uint8_t*)d->rom + d->rom->modes_offset);
- dprint(d, 1, "%s: restore mode\n", __FUNCTION__);
+ dprint(d, 1, "%s: restore mode (%s)\n", __FUNCTION__,
+ qxl_mode_to_string(d->mode));
newmode = d->mode;
d->mode = QXL_MODE_UNDEFINED;
switch (newmode) {
.config_write = qxl_write_config,
.romfile = "vgabios-qxl.bin",
.vendor_id = REDHAT_PCI_VENDOR_ID,
+ .device_id = QXL_DEVICE_ID_STABLE,
.class_id = PCI_CLASS_DISPLAY_VGA,
.qdev.props = (Property[]) {
DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, 64 * 1024 * 1024),
.qdev.vmsd = &qxl_vmstate,
.init = qxl_init_secondary,
.vendor_id = REDHAT_PCI_VENDOR_ID,
+ .device_id = QXL_DEVICE_ID_STABLE,
.class_id = PCI_CLASS_DISPLAY_OTHER,
.qdev.props = (Property[]) {
DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, 64 * 1024 * 1024),
.eps = (USBDescEndpoint[]) {
{
.bEndpointAddress = USB_DIR_OUT | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0,
.bInterval = 0x01,
},
{
.bEndpointAddress = USB_DIR_IN | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0,
.bInterval = 0x01,
},
.eps = (USBDescEndpoint[]) {
{
.bEndpointAddress = USB_DIR_OUT | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x09,
.bInterval = 0x01,
},
{
.bEndpointAddress = USB_DIR_IN | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x09,
.bInterval = 0x01,
},
.eps = (USBDescEndpoint[]) {
{
.bEndpointAddress = USB_DIR_OUT | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x11,
.bInterval = 0x01,
},
{
.bEndpointAddress = USB_DIR_IN | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x11,
.bInterval = 0x01,
},
.eps = (USBDescEndpoint[]) {
{
.bEndpointAddress = USB_DIR_OUT | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x19,
.bInterval = 0x01,
},
{
.bEndpointAddress = USB_DIR_IN | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x19,
.bInterval = 0x01,
},
.eps = (USBDescEndpoint[]) {
{
.bEndpointAddress = USB_DIR_OUT | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x21,
.bInterval = 0x01,
},
{
.bEndpointAddress = USB_DIR_IN | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x21,
.bInterval = 0x01,
},
.eps = (USBDescEndpoint[]) {
{
.bEndpointAddress = USB_DIR_OUT | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x31,
.bInterval = 0x01,
},
{
.bEndpointAddress = USB_DIR_IN | USB_SCO_EP,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = 0x31,
.bInterval = 0x01,
},
static int usb_qdev_exit(DeviceState *qdev)
{
USBDevice *dev = DO_UPCAST(USBDevice, qdev, qdev);
- USBBus *bus = usb_bus_from_device(dev);
if (dev->attached) {
usb_device_detach(dev);
}
- bus->ops->device_destroy(bus, dev);
if (dev->info->handle_destroy) {
dev->info->handle_destroy(dev);
}
return dev;
}
-void usb_register_port(USBBus *bus, USBPort *port, void *opaque, int index,
- USBPortOps *ops, int speedmask)
+static void usb_fill_port(USBPort *port, void *opaque, int index,
+ USBPortOps *ops, int speedmask)
{
- port->opaque = opaque;
- port->index = index;
port->opaque = opaque;
port->index = index;
port->ops = ops;
port->speedmask = speedmask;
+ usb_port_location(port, NULL, index + 1);
+}
+
+void usb_register_port(USBBus *bus, USBPort *port, void *opaque, int index,
+ USBPortOps *ops, int speedmask)
+{
+ usb_fill_port(port, opaque, index, ops, speedmask);
QTAILQ_INSERT_TAIL(&bus->free, port, next);
bus->nfree++;
}
+int usb_register_companion(const char *masterbus, USBPort *ports[],
+ uint32_t portcount, uint32_t firstport,
+ void *opaque, USBPortOps *ops, int speedmask)
+{
+ USBBus *bus;
+ int i;
+
+ QTAILQ_FOREACH(bus, &busses, next) {
+ if (strcmp(bus->qbus.name, masterbus) == 0) {
+ break;
+ }
+ }
+
+ if (!bus || !bus->ops->register_companion) {
+ qerror_report(QERR_INVALID_PARAMETER_VALUE, "masterbus",
+ "an USB masterbus");
+ if (bus) {
+ error_printf_unless_qmp(
+ "USB bus '%s' does not allow companion controllers\n",
+ masterbus);
+ }
+ return -1;
+ }
+
+ for (i = 0; i < portcount; i++) {
+ usb_fill_port(ports[i], opaque, i, ops, speedmask);
+ }
+
+ return bus->ops->register_companion(bus, ports, portcount, firstport);
+}
+
void usb_port_location(USBPort *downstream, USBPort *upstream, int portnr)
{
if (upstream) {
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * TODO:
- * o Downstream port handoff
*/
#include "hw.h"
#define PORTSC_BEGIN PORTSC
#define PORTSC_END (PORTSC + 4 * NB_PORTS)
/*
- * Bits that are reserverd or are read-only are masked out of values
+ * Bits that are reserved or are read-only are masked out of values
* written to us by software
*/
-#define PORTSC_RO_MASK 0x007021c5
+#define PORTSC_RO_MASK 0x007001c0
#define PORTSC_RWC_MASK 0x0000002a
#define PORTSC_WKOC_E (1 << 22) // Wake on Over Current Enable
#define PORTSC_WKDS_E (1 << 21) // Wake on Disconnect Enable
#define FRAME_TIMER_NS (1000000000 / FRAME_TIMER_FREQ)
#define NB_MAXINTRATE 8 // Max rate at which controller issues ints
-#define NB_PORTS 4 // Number of downstream ports
+#define NB_PORTS 6 // Number of downstream ports
#define BUFF_SIZE 5*4096 // Max bytes to transfer per transaction
#define MAX_ITERATIONS 20 // Max number of QH before we break the loop
#define MAX_QH 100 // Max allowable queue heads in a chain
qemu_irq irq;
target_phys_addr_t mem_base;
int mem;
- int num_ports;
+ int companion_count;
/* properties */
uint32_t freq;
int astate; // Current state in asynchronous schedule
int pstate; // Current state in periodic schedule
USBPort ports[NB_PORTS];
+ USBPort *companion_ports[NB_PORTS];
uint32_t usbsts_pending;
QTAILQ_HEAD(, EHCIQueue) queues;
trace_usb_ehci_port_attach(port->index, port->dev->product_desc);
+ if (*portsc & PORTSC_POWNER) {
+ USBPort *companion = s->companion_ports[port->index];
+ companion->dev = port->dev;
+ companion->ops->attach(companion);
+ return;
+ }
+
*portsc |= PORTSC_CONNECT;
*portsc |= PORTSC_CSC;
- /*
- * If a high speed device is attached then we own this port(indicated
- * by zero in the PORTSC_POWNER bit field) so set the status bit
- * and set an interrupt if enabled.
- */
- if ( !(*portsc & PORTSC_POWNER)) {
- ehci_set_interrupt(s, USBSTS_PCD);
- }
+ ehci_set_interrupt(s, USBSTS_PCD);
}
static void ehci_detach(USBPort *port)
trace_usb_ehci_port_detach(port->index);
- *portsc &= ~PORTSC_CONNECT;
+ if (*portsc & PORTSC_POWNER) {
+ USBPort *companion = s->companion_ports[port->index];
+ companion->ops->detach(companion);
+ companion->dev = NULL;
+ return;
+ }
+
+ ehci_queues_rip_device(s, port->dev);
+
+ *portsc &= ~(PORTSC_CONNECT|PORTSC_PED);
*portsc |= PORTSC_CSC;
- /*
- * If a high speed device is attached then we own this port(indicated
- * by zero in the PORTSC_POWNER bit field) so set the status bit
- * and set an interrupt if enabled.
- */
- if ( !(*portsc & PORTSC_POWNER)) {
- ehci_set_interrupt(s, USBSTS_PCD);
+ ehci_set_interrupt(s, USBSTS_PCD);
+}
+
+static void ehci_child_detach(USBPort *port, USBDevice *child)
+{
+ EHCIState *s = port->opaque;
+ uint32_t portsc = s->portsc[port->index];
+
+ if (portsc & PORTSC_POWNER) {
+ USBPort *companion = s->companion_ports[port->index];
+ companion->ops->child_detach(companion, child);
+ companion->dev = NULL;
+ return;
}
+
+ ehci_queues_rip_device(s, child);
+}
+
+static void ehci_wakeup(USBPort *port)
+{
+ EHCIState *s = port->opaque;
+ uint32_t portsc = s->portsc[port->index];
+
+ if (portsc & PORTSC_POWNER) {
+ USBPort *companion = s->companion_ports[port->index];
+ if (companion->ops->wakeup) {
+ companion->ops->wakeup(companion);
+ }
+ }
+}
+
+static int ehci_register_companion(USBBus *bus, USBPort *ports[],
+ uint32_t portcount, uint32_t firstport)
+{
+ EHCIState *s = container_of(bus, EHCIState, bus);
+ uint32_t i;
+
+ if (firstport + portcount > NB_PORTS) {
+ qerror_report(QERR_INVALID_PARAMETER_VALUE, "firstport",
+ "firstport on masterbus");
+ error_printf_unless_qmp(
+ "firstport value of %u makes companion take ports %u - %u, which "
+ "is outside of the valid range of 0 - %u\n", firstport, firstport,
+ firstport + portcount - 1, NB_PORTS - 1);
+ return -1;
+ }
+
+ for (i = 0; i < portcount; i++) {
+ if (s->companion_ports[firstport + i]) {
+ qerror_report(QERR_INVALID_PARAMETER_VALUE, "masterbus",
+ "an USB masterbus");
+ error_printf_unless_qmp(
+ "port %u on masterbus %s already has a companion assigned\n",
+ firstport + i, bus->qbus.name);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < portcount; i++) {
+ s->companion_ports[firstport + i] = ports[i];
+ s->ports[firstport + i].speedmask |=
+ USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL;
+ /* Ensure devs attached before the initial reset go to the companion */
+ s->portsc[firstport + i] = PORTSC_POWNER;
+ }
+
+ s->companion_count++;
+ s->mmio[0x05] = (s->companion_count << 4) | portcount;
+
+ return 0;
}
/* 4.1 host controller initialization */
{
EHCIState *s = opaque;
int i;
+ USBDevice *devs[NB_PORTS];
trace_usb_ehci_reset();
+ /*
+ * Do the detach before touching portsc, so that it correctly gets send to
+ * us or to our companion based on PORTSC_POWNER before the reset.
+ */
+ for(i = 0; i < NB_PORTS; i++) {
+ devs[i] = s->ports[i].dev;
+ if (devs[i]) {
+ usb_attach(&s->ports[i], NULL);
+ }
+ }
+
memset(&s->mmio[OPREGBASE], 0x00, MMIO_SIZE - OPREGBASE);
s->usbcmd = NB_MAXINTRATE << USBCMD_ITC_SH;
s->attach_poll_counter = 0;
for(i = 0; i < NB_PORTS; i++) {
- s->portsc[i] = PORTSC_POWNER | PORTSC_PPOWER;
-
- if (s->ports[i].dev) {
- usb_attach(&s->ports[i], s->ports[i].dev);
+ if (s->companion_ports[i]) {
+ s->portsc[i] = PORTSC_POWNER | PORTSC_PPOWER;
+ } else {
+ s->portsc[i] = PORTSC_PPOWER;
+ }
+ if (devs[i]) {
+ usb_attach(&s->ports[i], devs[i]);
}
}
ehci_queues_rip_all(s);
exit(1);
}
+static void handle_port_owner_write(EHCIState *s, int port, uint32_t owner)
+{
+ USBDevice *dev = s->ports[port].dev;
+ uint32_t *portsc = &s->portsc[port];
+ uint32_t orig;
+
+ if (s->companion_ports[port] == NULL)
+ return;
+
+ owner = owner & PORTSC_POWNER;
+ orig = *portsc & PORTSC_POWNER;
+
+ if (!(owner ^ orig)) {
+ return;
+ }
+
+ if (dev) {
+ usb_attach(&s->ports[port], NULL);
+ }
+
+ *portsc &= ~PORTSC_POWNER;
+ *portsc |= owner;
+
+ if (dev) {
+ usb_attach(&s->ports[port], dev);
+ }
+}
+
static void handle_port_status_write(EHCIState *s, int port, uint32_t val)
{
uint32_t *portsc = &s->portsc[port];
- int rwc;
USBDevice *dev = s->ports[port].dev;
- rwc = val & PORTSC_RWC_MASK;
+ /* Clear rwc bits */
+ *portsc &= ~(val & PORTSC_RWC_MASK);
+ /* The guest may clear, but not set the PED bit */
+ *portsc &= val | ~PORTSC_PED;
+ /* POWNER is masked out by RO_MASK as it is RO when we've no companion */
+ handle_port_owner_write(s, port, val);
+ /* And finally apply RO_MASK */
val &= PORTSC_RO_MASK;
- // handle_read_write_clear(&val, portsc, PORTSC_PEDC | PORTSC_CSC);
-
- *portsc &= ~rwc;
-
if ((val & PORTSC_PRESET) && !(*portsc & PORTSC_PRESET)) {
trace_usb_ehci_port_reset(port, 1);
}
if (!(val & PORTSC_PRESET) &&(*portsc & PORTSC_PRESET)) {
trace_usb_ehci_port_reset(port, 0);
- usb_attach(&s->ports[port], dev);
-
- // TODO how to handle reset of ports with no device
if (dev) {
+ usb_attach(&s->ports[port], dev);
usb_send_msg(dev, USB_MSG_RESET);
- }
-
- if (s->ports[port].dev) {
*portsc &= ~PORTSC_CSC;
}
- /* Table 2.16 Set the enable bit(and enable bit change) to indicate
+ /*
+ * Table 2.16 Set the enable bit(and enable bit change) to indicate
* to SW that this port has a high speed device attached
- *
- * TODO - when to disable?
*/
- val |= PORTSC_PED;
- val |= PORTSC_PEDC;
+ if (dev && (dev->speedmask & USB_SPEED_MASK_HIGH)) {
+ val |= PORTSC_PED;
+ }
}
*portsc &= ~PORTSC_RO_MASK;
val &= 0x1;
if (val) {
for(i = 0; i < NB_PORTS; i++)
- s->portsc[i] &= ~PORTSC_POWNER;
+ handle_port_owner_write(s, i, 0);
}
break;
return 0;
}
-static void ehci_async_complete_packet(USBDevice *dev, USBPacket *packet)
+static void ehci_async_complete_packet(USBPort *port, USBPacket *packet)
{
- EHCIQueue *q = container_of(packet, EHCIQueue, packet);
+ EHCIQueue *q;
+ EHCIState *s = port->opaque;
+ uint32_t portsc = s->portsc[port->index];
+
+ if (portsc & PORTSC_POWNER) {
+ USBPort *companion = s->companion_ports[port->index];
+ companion->ops->complete(companion, packet);
+ return;
+ }
+ q = container_of(packet, EHCIQueue, packet);
trace_usb_ehci_queue_action(q, "wakeup");
assert(q->async == EHCI_ASYNC_INFLIGHT);
q->async = EHCI_ASYNC_FINISHED;
port = &q->ehci->ports[i];
dev = port->dev;
- // TODO sometime we will also need to check if we are the port owner
-
if (!(q->ehci->portsc[i] &(PORTSC_CONNECT))) {
DPRINTF("Port %d, no exec, not connected(%08X)\n",
i, q->ehci->portsc[i]);
port = &ehci->ports[j];
dev = port->dev;
- // TODO sometime we will also need to check if we are the port owner
-
if (!(ehci->portsc[j] &(PORTSC_CONNECT))) {
continue;
}
cpu_register_physical_memory(addr, size, s->mem);
}
-static void ehci_device_destroy(USBBus *bus, USBDevice *dev)
-{
- EHCIState *s = container_of(bus, EHCIState, bus);
-
- ehci_queues_rip_device(s, dev);
-}
-
static int usb_ehci_initfn(PCIDevice *dev);
static USBPortOps ehci_port_ops = {
.attach = ehci_attach,
.detach = ehci_detach,
+ .child_detach = ehci_child_detach,
+ .wakeup = ehci_wakeup,
.complete = ehci_async_complete_packet,
};
static USBBusOps ehci_bus_ops = {
- .device_destroy = ehci_device_destroy,
+ .register_companion = ehci_register_companion,
};
-static PCIDeviceInfo ehci_info = {
- .qdev.name = "usb-ehci",
- .qdev.size = sizeof(EHCIState),
- .init = usb_ehci_initfn,
- .vendor_id = PCI_VENDOR_ID_INTEL,
- .device_id = PCI_DEVICE_ID_INTEL_82801D,
- .revision = 0x10,
- .class_id = PCI_CLASS_SERIAL_USB,
- .qdev.props = (Property[]) {
- DEFINE_PROP_UINT32("freq", EHCIState, freq, FRAME_TIMER_FREQ),
- DEFINE_PROP_UINT32("maxframes", EHCIState, maxframes, 128),
- DEFINE_PROP_END_OF_LIST(),
- },
+static Property ehci_properties[] = {
+ DEFINE_PROP_UINT32("freq", EHCIState, freq, FRAME_TIMER_FREQ),
+ DEFINE_PROP_UINT32("maxframes", EHCIState, maxframes, 128),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static PCIDeviceInfo ehci_info[] = {
+ {
+ .qdev.name = "usb-ehci",
+ .qdev.size = sizeof(EHCIState),
+ .init = usb_ehci_initfn,
+ .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = PCI_DEVICE_ID_INTEL_82801D, /* ich4 */
+ .revision = 0x10,
+ .class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = ehci_properties,
+ },{
+ .qdev.name = "ich9-usb-ehci1",
+ .qdev.size = sizeof(EHCIState),
+ .init = usb_ehci_initfn,
+ .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = PCI_DEVICE_ID_INTEL_82801I_EHCI1,
+ .revision = 0x03,
+ .class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = ehci_properties,
+ },{
+ /* end of list */
+ }
};
static int usb_ehci_initfn(PCIDevice *dev)
for(i = 0; i < NB_PORTS; i++) {
usb_register_port(&s->bus, &s->ports[i], s, i, &ehci_port_ops,
USB_SPEED_MASK_HIGH);
- usb_port_location(&s->ports[i], NULL, i+1);
s->ports[i].dev = 0;
}
static void ehci_register(void)
{
- pci_qdev_register(&ehci_info);
+ pci_qdev_register_many(ehci_info);
}
device_init(ehci_register);
.str = desc_strings,
};
-static const uint8_t qemu_hub_dev_descriptor[] = {
- 0x12, /* u8 bLength; */
- 0x01, /* u8 bDescriptorType; Device */
- 0x10, 0x01, /* u16 bcdUSB; v1.1 */
-
- 0x09, /* u8 bDeviceClass; HUB_CLASSCODE */
- 0x00, /* u8 bDeviceSubClass; */
- 0x00, /* u8 bDeviceProtocol; [ low/full speeds only ] */
- 0x08, /* u8 bMaxPacketSize0; 8 Bytes */
-
- 0x00, 0x00, /* u16 idVendor; */
- 0x00, 0x00, /* u16 idProduct; */
- 0x01, 0x01, /* u16 bcdDevice */
-
- 0x03, /* u8 iManufacturer; */
- 0x02, /* u8 iProduct; */
- 0x01, /* u8 iSerialNumber; */
- 0x01 /* u8 bNumConfigurations; */
-};
-
-/* XXX: patch interrupt size */
-static const uint8_t qemu_hub_config_descriptor[] = {
-
- /* one configuration */
- 0x09, /* u8 bLength; */
- 0x02, /* u8 bDescriptorType; Configuration */
- 0x19, 0x00, /* u16 wTotalLength; */
- 0x01, /* u8 bNumInterfaces; (1) */
- 0x01, /* u8 bConfigurationValue; */
- 0x00, /* u8 iConfiguration; */
- 0xe0, /* u8 bmAttributes;
- Bit 7: must be set,
- 6: Self-powered,
- 5: Remote wakeup,
- 4..0: resvd */
- 0x00, /* u8 MaxPower; */
-
- /* USB 1.1:
- * USB 2.0, single TT organization (mandatory):
- * one interface, protocol 0
- *
- * USB 2.0, multiple TT organization (optional):
- * two interfaces, protocols 1 (like single TT)
- * and 2 (multiple TT mode) ... config is
- * sometimes settable
- * NOT IMPLEMENTED
- */
-
- /* one interface */
- 0x09, /* u8 if_bLength; */
- 0x04, /* u8 if_bDescriptorType; Interface */
- 0x00, /* u8 if_bInterfaceNumber; */
- 0x00, /* u8 if_bAlternateSetting; */
- 0x01, /* u8 if_bNumEndpoints; */
- 0x09, /* u8 if_bInterfaceClass; HUB_CLASSCODE */
- 0x00, /* u8 if_bInterfaceSubClass; */
- 0x00, /* u8 if_bInterfaceProtocol; [usb1.1 or single tt] */
- 0x00, /* u8 if_iInterface; */
-
- /* one endpoint (status change endpoint) */
- 0x07, /* u8 ep_bLength; */
- 0x05, /* u8 ep_bDescriptorType; Endpoint */
- 0x81, /* u8 ep_bEndpointAddress; IN Endpoint 1 */
- 0x03, /* u8 ep_bmAttributes; Interrupt */
- 0x02, 0x00, /* u16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) */
- 0xff /* u8 ep_bInterval; (255ms -- usb 2.0 spec) */
-};
-
static const uint8_t qemu_hub_hub_descriptor[] =
{
0x00, /* u8 bLength; patched in later */
USBHubState *s = port1->opaque;
USBHubPort *port = &s->ports[port1->index];
+ /* Let upstream know the device on this port is gone */
+ s->dev.port->ops->child_detach(s->dev.port, port1->dev);
+
port->wPortStatus &= ~PORT_STAT_CONNECTION;
port->wPortChange |= PORT_STAT_C_CONNECTION;
if (port->wPortStatus & PORT_STAT_ENABLE) {
}
}
-static void usb_hub_wakeup(USBDevice *dev)
+static void usb_hub_child_detach(USBPort *port1, USBDevice *child)
+{
+ USBHubState *s = port1->opaque;
+
+ /* Pass along upstream */
+ s->dev.port->ops->child_detach(s->dev.port, child);
+}
+
+static void usb_hub_wakeup(USBPort *port1)
{
- USBHubState *s = dev->port->opaque;
- USBHubPort *port = &s->ports[dev->port->index];
+ USBHubState *s = port1->opaque;
+ USBHubPort *port = &s->ports[port1->index];
if (port->wPortStatus & PORT_STAT_SUSPEND) {
port->wPortChange |= PORT_STAT_C_SUSPEND;
}
}
-static void usb_hub_complete(USBDevice *dev, USBPacket *packet)
+static void usb_hub_complete(USBPort *port, USBPacket *packet)
{
- USBHubState *s = dev->port->opaque;
+ USBHubState *s = port->opaque;
/*
* Just pass it along upstream for now.
static USBPortOps usb_hub_port_ops = {
.attach = usb_hub_attach,
.detach = usb_hub_detach,
+ .child_detach = usb_hub_child_detach,
.wakeup = usb_hub_wakeup,
.complete = usb_hub_complete,
};
static void musb_attach(USBPort *port);
static void musb_detach(USBPort *port);
-static void musb_schedule_cb(USBDevice *dev, USBPacket *p);
-static void musb_device_destroy(USBBus *bus, USBDevice *dev);
+static void musb_child_detach(USBPort *port, USBDevice *child);
+static void musb_schedule_cb(USBPort *port, USBPacket *p);
+static void musb_async_cancel_device(MUSBState *s, USBDevice *dev);
static USBPortOps musb_port_ops = {
.attach = musb_attach,
.detach = musb_detach,
+ .child_detach = musb_child_detach,
.complete = musb_schedule_cb,
};
static USBBusOps musb_bus_ops = {
- .device_destroy = musb_device_destroy,
};
typedef struct MUSBPacket MUSBPacket;
usb_bus_new(&s->bus, &musb_bus_ops, NULL /* FIXME */);
usb_register_port(&s->bus, &s->port, s, 0, &musb_port_ops,
USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
- usb_port_location(&s->port, NULL, 1);
return s;
}
{
MUSBState *s = (MUSBState *) port->opaque;
+ musb_async_cancel_device(s, port->dev);
+
musb_intr_set(s, musb_irq_disconnect, 1);
musb_session_update(s, 1, s->session);
}
+static void musb_child_detach(USBPort *port, USBDevice *child)
+{
+ MUSBState *s = (MUSBState *) port->opaque;
+
+ musb_async_cancel_device(s, child);
+}
+
static void musb_cb_tick0(void *opaque)
{
MUSBEndPoint *ep = (MUSBEndPoint *) opaque;
#define musb_cb_tick (dir ? musb_cb_tick1 : musb_cb_tick0)
-static void musb_schedule_cb(USBDevice *dev, USBPacket *packey)
+static void musb_schedule_cb(USBPort *port, USBPacket *packey)
{
MUSBPacket *p = container_of(packey, MUSBPacket, p);
MUSBEndPoint *ep = p->ep;
}
ep->status[dir] = ret;
- usb_packet_complete(s->port.dev, &ep->packey[dir].p);
+ musb_schedule_cb(&s->port, &ep->packey[dir].p);
}
static void musb_tx_packet_complete(USBPacket *packey, void *opaque)
musb_rx_intr_set(s, epnum, 1);
}
-static void musb_device_destroy(USBBus *bus, USBDevice *dev)
+static void musb_async_cancel_device(MUSBState *s, USBDevice *dev)
{
- MUSBState *s = container_of(bus, MUSBState, bus);
int ep, dir;
for (ep = 0; ep < 16; ep++) {
};
static void ohci_bus_stop(OHCIState *ohci);
+static void ohci_async_cancel_device(OHCIState *ohci, USBDevice *dev);
/* Bitfields for the first word of an Endpoint Desciptor. */
#define OHCI_ED_FA_SHIFT 0
{
OHCIState *s = port1->opaque;
OHCIPort *port = &s->rhport[port1->index];
+ uint32_t old_state = port->ctrl;
/* set connect status */
port->ctrl |= OHCI_PORT_CCS | OHCI_PORT_CSC;
}
DPRINTF("usb-ohci: Attached port %d\n", port1->index);
+
+ if (old_state != port->ctrl) {
+ ohci_set_interrupt(s, OHCI_INTR_RHSC);
+ }
}
static void ohci_detach(USBPort *port1)
OHCIPort *port = &s->rhport[port1->index];
uint32_t old_state = port->ctrl;
+ ohci_async_cancel_device(s, port1->dev);
+
/* set connect status */
if (port->ctrl & OHCI_PORT_CCS) {
port->ctrl &= ~OHCI_PORT_CCS;
}
DPRINTF("usb-ohci: Detached port %d\n", port1->index);
- if (old_state != port->ctrl)
+ if (old_state != port->ctrl) {
ohci_set_interrupt(s, OHCI_INTR_RHSC);
+ }
}
-static void ohci_wakeup(USBDevice *dev)
+static void ohci_wakeup(USBPort *port1)
{
- USBBus *bus = usb_bus_from_device(dev);
- OHCIState *s = container_of(bus, OHCIState, bus);
- int portnum = dev->port->index;
- OHCIPort *port = &s->rhport[portnum];
+ OHCIState *s = port1->opaque;
+ OHCIPort *port = &s->rhport[port1->index];
uint32_t intr = 0;
if (port->ctrl & OHCI_PORT_PSS) {
- DPRINTF("usb-ohci: port %d: wakeup\n", portnum);
+ DPRINTF("usb-ohci: port %d: wakeup\n", port1->index);
port->ctrl |= OHCI_PORT_PSSC;
port->ctrl &= ~OHCI_PORT_PSS;
intr = OHCI_INTR_RHSC;
ohci_set_interrupt(s, intr);
}
+static void ohci_child_detach(USBPort *port1, USBDevice *child)
+{
+ OHCIState *s = port1->opaque;
+
+ ohci_async_cancel_device(s, child);
+}
+
/* Reset the controller */
static void ohci_reset(void *opaque)
{
static void ohci_process_lists(OHCIState *ohci, int completion);
-static void ohci_async_complete_packet(USBDevice *dev, USBPacket *packet)
+static void ohci_async_complete_packet(USBPort *port, USBPacket *packet)
{
OHCIState *ohci = container_of(packet, OHCIState, usb_packet);
#ifdef DEBUG_PACKET
}
}
-static void ohci_device_destroy(USBBus *bus, USBDevice *dev)
+static void ohci_async_cancel_device(OHCIState *ohci, USBDevice *dev)
{
- OHCIState *ohci = container_of(bus, OHCIState, bus);
-
if (ohci->async_td && ohci->usb_packet.owner == dev) {
usb_cancel_packet(&ohci->usb_packet);
ohci->async_td = 0;
static USBPortOps ohci_port_ops = {
.attach = ohci_attach,
.detach = ohci_detach,
+ .child_detach = ohci_child_detach,
.wakeup = ohci_wakeup,
.complete = ohci_async_complete_packet,
};
static USBBusOps ohci_bus_ops = {
- .device_destroy = ohci_device_destroy,
};
-static void usb_ohci_init(OHCIState *ohci, DeviceState *dev,
- int num_ports, uint32_t localmem_base)
+static int usb_ohci_init(OHCIState *ohci, DeviceState *dev,
+ int num_ports, uint32_t localmem_base,
+ char *masterbus, uint32_t firstport)
{
int i;
usb_frame_time, usb_bit_time);
}
+ ohci->num_ports = num_ports;
+ if (masterbus) {
+ USBPort *ports[OHCI_MAX_PORTS];
+ for(i = 0; i < num_ports; i++) {
+ ports[i] = &ohci->rhport[i].port;
+ }
+ if (usb_register_companion(masterbus, ports, num_ports,
+ firstport, ohci, &ohci_port_ops,
+ USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL) != 0) {
+ return -1;
+ }
+ } else {
+ usb_bus_new(&ohci->bus, &ohci_bus_ops, dev);
+ for (i = 0; i < num_ports; i++) {
+ usb_register_port(&ohci->bus, &ohci->rhport[i].port,
+ ohci, i, &ohci_port_ops,
+ USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
+ }
+ }
+
ohci->mem = cpu_register_io_memory(ohci_readfn, ohci_writefn, ohci,
DEVICE_LITTLE_ENDIAN);
ohci->localmem_base = localmem_base;
ohci->name = dev->info->name;
- usb_bus_new(&ohci->bus, &ohci_bus_ops, dev);
- ohci->num_ports = num_ports;
- for (i = 0; i < num_ports; i++) {
- usb_register_port(&ohci->bus, &ohci->rhport[i].port, ohci, i, &ohci_port_ops,
- USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
- usb_port_location(&ohci->rhport[i].port, NULL, i+1);
- }
-
ohci->async_td = 0;
qemu_register_reset(ohci_reset, ohci);
+
+ return 0;
}
typedef struct {
PCIDevice pci_dev;
OHCIState state;
+ char *masterbus;
+ uint32_t num_ports;
+ uint32_t firstport;
} OHCIPCIState;
static int usb_ohci_initfn_pci(struct PCIDevice *dev)
{
OHCIPCIState *ohci = DO_UPCAST(OHCIPCIState, pci_dev, dev);
- int num_ports = 3;
ohci->pci_dev.config[PCI_CLASS_PROG] = 0x10; /* OHCI */
/* TODO: RST# value should be 0. */
ohci->pci_dev.config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin 1 */
- usb_ohci_init(&ohci->state, &dev->qdev, num_ports, 0);
+ if (usb_ohci_init(&ohci->state, &dev->qdev, ohci->num_ports, 0,
+ ohci->masterbus, ohci->firstport) != 0) {
+ return -1;
+ }
ohci->state.irq = ohci->pci_dev.irq[0];
/* TODO: avoid cast below by using dev */
{
OHCISysBusState *s = FROM_SYSBUS(OHCISysBusState, dev);
- usb_ohci_init(&s->ohci, &dev->qdev, s->num_ports, s->dma_offset);
+ /* Cannot fail as we pass NULL for masterbus */
+ usb_ohci_init(&s->ohci, &dev->qdev, s->num_ports, s->dma_offset, NULL, 0);
sysbus_init_irq(dev, &s->ohci.irq);
sysbus_init_mmio(dev, 0x1000, s->ohci.mem);
.vendor_id = PCI_VENDOR_ID_APPLE,
.device_id = PCI_DEVICE_ID_APPLE_IPID_USB,
.class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = (Property[]) {
+ DEFINE_PROP_STRING("masterbus", OHCIPCIState, masterbus),
+ DEFINE_PROP_UINT32("num-ports", OHCIPCIState, num_ports, 3),
+ DEFINE_PROP_UINT32("firstport", OHCIPCIState, firstport, 0),
+ DEFINE_PROP_END_OF_LIST(),
+ },
};
static SysBusDeviceInfo ohci_sysbus_info = {
struct UHCIState {
PCIDevice dev;
- USBBus bus;
+ USBBus bus; /* Note unused when we're a companion controller */
uint16_t cmd; /* cmd register */
uint16_t status;
uint16_t intr; /* interrupt enable register */
/* Active packets */
QTAILQ_HEAD(,UHCIAsync) async_pending;
uint8_t num_ports_vmstate;
+
+ /* Properties */
+ char *masterbus;
+ uint32_t firstport;
};
typedef struct UHCI_TD {
UHCIState *s = port1->opaque;
UHCIPort *port = &s->ports[port1->index];
+ uhci_async_cancel_device(s, port1->dev);
+
/* set connect status */
if (port->ctrl & UHCI_PORT_CCS) {
port->ctrl &= ~UHCI_PORT_CCS;
uhci_resume(s);
}
-static void uhci_wakeup(USBDevice *dev)
+static void uhci_child_detach(USBPort *port1, USBDevice *child)
+{
+ UHCIState *s = port1->opaque;
+
+ uhci_async_cancel_device(s, child);
+}
+
+static void uhci_wakeup(USBPort *port1)
{
- USBBus *bus = usb_bus_from_device(dev);
- UHCIState *s = container_of(bus, UHCIState, bus);
- UHCIPort *port = s->ports + dev->port->index;
+ UHCIState *s = port1->opaque;
+ UHCIPort *port = &s->ports[port1->index];
if (port->ctrl & UHCI_PORT_SUSPEND && !(port->ctrl & UHCI_PORT_RD)) {
port->ctrl |= UHCI_PORT_RD;
return ret;
}
-static void uhci_async_complete(USBDevice *dev, USBPacket *packet);
+static void uhci_async_complete(USBPort *port, USBPacket *packet);
static void uhci_process_frame(UHCIState *s);
/* return -1 if fatal error (frame must be stopped)
return len;
}
-static void uhci_async_complete(USBDevice *dev, USBPacket *packet)
+static void uhci_async_complete(USBPort *port, USBPacket *packet)
{
UHCIAsync *async = container_of(packet, UHCIAsync, packet);
UHCIState *s = async->uhci;
register_ioport_read(addr, 32, 1, uhci_ioport_readb, s);
}
-static void uhci_device_destroy(USBBus *bus, USBDevice *dev)
-{
- UHCIState *s = container_of(bus, UHCIState, bus);
-
- uhci_async_cancel_device(s, dev);
-}
-
static USBPortOps uhci_port_ops = {
.attach = uhci_attach,
.detach = uhci_detach,
+ .child_detach = uhci_child_detach,
.wakeup = uhci_wakeup,
.complete = uhci_async_complete,
};
static USBBusOps uhci_bus_ops = {
- .device_destroy = uhci_device_destroy,
};
static int usb_uhci_common_initfn(PCIDevice *dev)
pci_conf[PCI_INTERRUPT_PIN] = 4; // interrupt pin 3
pci_conf[USB_SBRN] = USB_RELEASE_1; // release number
- usb_bus_new(&s->bus, &uhci_bus_ops, &s->dev.qdev);
- for(i = 0; i < NB_PORTS; i++) {
- usb_register_port(&s->bus, &s->ports[i].port, s, i, &uhci_port_ops,
- USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
- usb_port_location(&s->ports[i].port, NULL, i+1);
+ if (s->masterbus) {
+ USBPort *ports[NB_PORTS];
+ for(i = 0; i < NB_PORTS; i++) {
+ ports[i] = &s->ports[i].port;
+ }
+ if (usb_register_companion(s->masterbus, ports, NB_PORTS,
+ s->firstport, s, &uhci_port_ops,
+ USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL) != 0) {
+ return -1;
+ }
+ } else {
+ usb_bus_new(&s->bus, &uhci_bus_ops, &s->dev.qdev);
+ for (i = 0; i < NB_PORTS; i++) {
+ usb_register_port(&s->bus, &s->ports[i].port, s, i, &uhci_port_ops,
+ USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
+ }
}
s->frame_timer = qemu_new_timer_ns(vm_clock, uhci_frame_timer, s);
s->num_ports_vmstate = NB_PORTS;
return usb_uhci_common_initfn(dev);
}
+static Property uhci_properties[] = {
+ DEFINE_PROP_STRING("masterbus", UHCIState, masterbus),
+ DEFINE_PROP_UINT32("firstport", UHCIState, firstport, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static PCIDeviceInfo uhci_info[] = {
{
.qdev.name = "piix3-usb-uhci",
.device_id = PCI_DEVICE_ID_INTEL_82371SB_2,
.revision = 0x01,
.class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = uhci_properties,
},{
.qdev.name = "piix4-usb-uhci",
.qdev.size = sizeof(UHCIState),
.device_id = PCI_DEVICE_ID_INTEL_82371AB_2,
.revision = 0x01,
.class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = uhci_properties,
},{
.qdev.name = "vt82c686b-usb-uhci",
.qdev.size = sizeof(UHCIState),
.device_id = PCI_DEVICE_ID_VIA_UHCI,
.revision = 0x01,
.class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = uhci_properties,
+ },{
+ .qdev.name = "ich9-usb-uhci1",
+ .qdev.size = sizeof(UHCIState),
+ .qdev.vmsd = &vmstate_uhci,
+ .init = usb_uhci_common_initfn,
+ .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI1,
+ .revision = 0x03,
+ .class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = uhci_properties,
+ },{
+ .qdev.name = "ich9-usb-uhci2",
+ .qdev.size = sizeof(UHCIState),
+ .qdev.vmsd = &vmstate_uhci,
+ .init = usb_uhci_common_initfn,
+ .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI2,
+ .revision = 0x03,
+ .class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = uhci_properties,
+ },{
+ .qdev.name = "ich9-usb-uhci3",
+ .qdev.size = sizeof(UHCIState),
+ .qdev.vmsd = &vmstate_uhci,
+ .init = usb_uhci_common_initfn,
+ .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI3,
+ .revision = 0x03,
+ .class_id = PCI_CLASS_SERIAL_USB,
+ .qdev.props = uhci_properties,
},{
/* end of list */
}
} else {
/* detach */
dev = port->dev;
+ assert(dev);
port->ops->detach(port);
- if (dev) {
- usb_send_msg(dev, USB_MSG_DETACH);
- dev->port = NULL;
- port->dev = NULL;
- }
+ usb_send_msg(dev, USB_MSG_DETACH);
+ dev->port = NULL;
+ port->dev = NULL;
}
}
void usb_wakeup(USBDevice *dev)
{
if (dev->remote_wakeup && dev->port && dev->port->ops->wakeup) {
- dev->port->ops->wakeup(dev);
+ dev->port->ops->wakeup(dev->port);
}
}
{
/* Note: p->owner != dev is possible in case dev is a hub */
assert(p->owner != NULL);
- dev->port->ops->complete(dev, p);
+ dev->port->ops->complete(dev->port, p);
p->owner = NULL;
}
typedef struct USBPortOps {
void (*attach)(USBPort *port);
void (*detach)(USBPort *port);
- void (*wakeup)(USBDevice *dev);
- void (*complete)(USBDevice *dev, USBPacket *p);
+ /*
+ * This gets called when a device downstream from the device attached to
+ * the port (iow attached through a hub) gets detached.
+ */
+ void (*child_detach)(USBPort *port, USBDevice *child);
+ void (*wakeup)(USBPort *port);
+ /*
+ * Note that port->dev will be different then the device from which
+ * the packet originated when a hub is involved, if you want the orginating
+ * device use p->owner
+ */
+ void (*complete)(USBPort *port, USBPacket *p);
} USBPortOps;
/* USB port on which a device can be connected */
};
struct USBBusOps {
- void (*device_destroy)(USBBus *bus, USBDevice *dev);
+ int (*register_companion)(USBBus *bus, USBPort *ports[],
+ uint32_t portcount, uint32_t firstport);
};
void usb_bus_new(USBBus *bus, USBBusOps *ops, DeviceState *host);
USBDevice *usbdevice_create(const char *cmdline);
void usb_register_port(USBBus *bus, USBPort *port, void *opaque, int index,
USBPortOps *ops, int speedmask);
+int usb_register_companion(const char *masterbus, USBPort *ports[],
+ uint32_t portcount, uint32_t firstport,
+ void *opaque, USBPortOps *ops, int speedmask);
void usb_port_location(USBPort *downstream, USBPort *upstream, int portnr);
void usb_unregister_port(USBBus *bus, USBPort *port);
int usb_device_attach(USBDevice *dev);
hdev->started = false;
qemu_free(hdev->log);
+ hdev->log = NULL;
hdev->log_size = 0;
}
#include "qemu-char.h"
#include "qemu-error.h"
+#include "trace.h"
#include "virtio-serial.h"
typedef struct VirtConsole {
static ssize_t flush_buf(VirtIOSerialPort *port, const uint8_t *buf, size_t len)
{
VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port);
-
- return qemu_chr_write(vcon->chr, buf, len);
+ ssize_t ret;
+
+ ret = qemu_chr_write(vcon->chr, buf, len);
+ trace_virtio_console_flush_buf(port->id, len, ret);
+
+ if (ret < 0) {
+ /*
+ * Ideally we'd get a better error code than just -1, but
+ * that's what the chardev interface gives us right now. If
+ * we had a finer-grained message, like -EPIPE, we could close
+ * this connection. Absent such error messages, the most we
+ * can do is to return 0 here.
+ *
+ * This will prevent stray -1 values to go to
+ * virtio-serial-bus.c and cause abort()s in
+ * do_flush_queued_data().
+ */
+ ret = 0;
+ }
+ return ret;
}
/* Callback function that's called when the guest opens the port */
{
VirtConsole *vcon = opaque;
+ trace_virtio_console_chr_read(vcon->port.id, size);
virtio_serial_write(&vcon->port, buf, size);
}
{
VirtConsole *vcon = opaque;
+ trace_virtio_console_chr_event(vcon->port.id, event);
switch (event) {
case CHR_EVENT_OPENED:
virtio_serial_open(&vcon->port);
#include "monitor.h"
#include "qemu-queue.h"
#include "sysbus.h"
+#include "trace.h"
#include "virtio-serial.h"
/* The virtio-serial bus on top of which the ports will ride as devices */
stw_p(&cpkt.event, event);
stw_p(&cpkt.value, value);
+ trace_virtio_serial_send_control_event(port->id, event, value);
return send_control_msg(port, &cpkt, sizeof(cpkt));
}
return;
}
+ trace_virtio_serial_throttle_port(port->id, throttle);
port->throttled = throttle;
if (throttle) {
return;
cpkt.event = lduw_p(&gcpkt->event);
cpkt.value = lduw_p(&gcpkt->value);
+ trace_virtio_serial_handle_control_message(cpkt.event, cpkt.value);
+
if (cpkt.event == VIRTIO_CONSOLE_DEVICE_READY) {
if (!cpkt.value) {
error_report("virtio-serial-bus: Guest failure in adding device %s",
port = find_port_by_id(vser, ldl_p(&gcpkt->id));
if (!port) {
- error_report("virtio-serial-bus: Unexpected port id %u for device %s\n",
+ error_report("virtio-serial-bus: Unexpected port id %u for device %s",
ldl_p(&gcpkt->id), vser->bus.qbus.name);
return;
}
+ trace_virtio_serial_handle_control_message_port(port->id);
+
info = DO_UPCAST(VirtIOSerialPortInfo, qdev, port->dev.info);
switch(cpkt.event) {
struct iovec *sg;
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
+ if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
+ error_report("Too many write descriptors in indirect table");
+ exit(1);
+ }
elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
sg = &elem->in_sg[elem->in_num++];
} else {
+ if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
+ error_report("Too many read descriptors in indirect table");
+ exit(1);
+ }
elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
sg = &elem->out_sg[elem->out_num++];
}
#endif
}
-static inline int xen_mapcache_enabled(void)
-{
-#ifdef CONFIG_XEN_MAPCACHE
- return xen_enabled();
-#else
- return 0;
-#endif
-}
-
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num);
void xen_piix3_set_irq(void *opaque, int irq_num, int level);
void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len);
int xen_init(void);
int xen_hvm_init(void);
void xen_vcpu_init(void);
+void xenstore_store_pv_console_info(int i, struct CharDriverState *chr);
#if defined(NEED_CPU_H) && !defined(CONFIG_USER_ONLY)
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size);
return xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
}
+static inline struct xs_handle *xs_open(unsigned long flags)
+{
+ return xs_daemon_open();
+}
+
+static inline void xs_close(struct xs_handle *xsh)
+{
+ if (xsh != NULL) {
+ xs_daemon_close(xsh);
+ }
+}
+
/* Xen 4.1 */
#else
static int con_init(struct XenDevice *xendev)
{
struct XenConsole *con = container_of(xendev, struct XenConsole, xendev);
- char *type, *dom;
+ char *type, *dom, label[32];
+ int ret = 0;
+ const char *output;
/* setup */
dom = xs_get_domain_path(xenstore, con->xendev.dom);
type = xenstore_read_str(con->console, "type");
if (!type || strcmp(type, "ioemu") != 0) {
xen_be_printf(xendev, 1, "not for me (type=%s)\n", type);
- return -1;
+ ret = -1;
+ goto out;
}
- if (!serial_hds[con->xendev.dev])
- xen_be_printf(xendev, 1, "WARNING: serial line %d not configured\n",
- con->xendev.dev);
- else
+ output = xenstore_read_str(con->console, "output");
+
+ /* no Xen override, use qemu output device */
+ if (output == NULL) {
con->chr = serial_hds[con->xendev.dev];
+ } else {
+ snprintf(label, sizeof(label), "xencons%d", con->xendev.dev);
+ con->chr = qemu_chr_open(label, output, NULL);
+ }
- return 0;
+ xenstore_store_pv_console_info(con->xendev.dev, con->chr);
+
+out:
+ qemu_free(type);
+ return ret;
}
static int con_connect(struct XenDevice *xendev)
{
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
int index, qflags, have_barriers, info = 0;
- char *h;
/* read xenstore entries */
if (blkdev->params == NULL) {
+ char *h = NULL;
blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
- h = strchr(blkdev->params, ':');
+ if (blkdev->params != NULL) {
+ h = strchr(blkdev->params, ':');
+ }
if (h != NULL) {
blkdev->fileproto = blkdev->params;
blkdev->filename = h+1;
blkdev->filename = blkdev->params;
}
}
+ if (!strcmp("aio", blkdev->fileproto)) {
+ blkdev->fileproto = "raw";
+ }
if (blkdev->mode == NULL) {
blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
}
blkdev->mode == NULL ||
blkdev->type == NULL ||
blkdev->dev == NULL) {
- return -1;
+ goto out_error;
}
/* read-only ? */
/* setup via xenbus -> create new block driver instance */
xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
blkdev->bs = bdrv_new(blkdev->dev);
- if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
- bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
- bdrv_delete(blkdev->bs);
- return -1;
+ if (blkdev->bs) {
+ if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
+ bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
+ bdrv_delete(blkdev->bs);
+ blkdev->bs = NULL;
+ }
+ }
+ if (!blkdev->bs) {
+ goto out_error;
}
} else {
/* setup via qemu cmdline -> already setup for us */
xenstore_write_be_int(&blkdev->xendev, "sectors",
blkdev->file_size / blkdev->file_blk);
return 0;
+
+out_error:
+ qemu_free(blkdev->params);
+ blkdev->params = NULL;
+ qemu_free(blkdev->mode);
+ blkdev->mode = NULL;
+ qemu_free(blkdev->type);
+ blkdev->type = NULL;
+ qemu_free(blkdev->dev);
+ blkdev->dev = NULL;
+ qemu_free(blkdev->devtype);
+ blkdev->devtype = NULL;
+ return -1;
}
static int blk_connect(struct XenDevice *xendev)
pci_conf = d->pci_dev.config;
- pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_XENSOURCE);
- pci_config_set_device_id(pci_conf, 0x0001);
- pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, PCI_VENDOR_ID_XENSOURCE);
- pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0001);
-
pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_config_set_revision(pci_conf, 1);
pci_config_set_prog_interface(pci_conf, 0);
- pci_config_set_class(pci_conf, PCI_CLASS_OTHERS << 8 | 0x80);
-
pci_conf[PCI_INTERRUPT_PIN] = 1;
pci_register_bar(&d->pci_dev, 0, 0x100,
.qdev.size = sizeof(PCIXenPlatformState),
.qdev.vmsd = &vmstate_xen_platform,
.qdev.reset = platform_reset,
+
+ .vendor_id = PCI_VENDOR_ID_XEN,
+ .device_id = PCI_DEVICE_ID_XEN_PLATFORM,
+ .class_id = PCI_CLASS_OTHERS << 8 | 0x80,
+ .subsystem_vendor_id = PCI_VENDOR_ID_XEN,
+ .subsystem_id = PCI_DEVICE_ID_XEN_PLATFORM,
+ .revision = 1,
};
static void xen_platform_register(void)
static int input_init(struct XenDevice *xendev)
{
- struct XenInput *in = container_of(xendev, struct XenInput, c.xendev);
-
- if (!in->c.ds) {
- xen_be_printf(xendev, 1, "ds not set (yet)\n");
- return -1;
- }
-
xenstore_write_be_int(xendev, "feature-abs-pointer", 1);
return 0;
}
&in->abs_pointer_wanted) == -1)
in->abs_pointer_wanted = 0;
+ if (!in->c.ds) {
+ char *vfb = xenstore_read_str(NULL, "device/vfb");
+ if (vfb == NULL) {
+ /* there is no vfb, run vkbd on its own */
+ in->c.ds = get_displaystate();
+ } else {
+ qemu_free(vfb);
+ xen_be_printf(xendev, 1, "ds not set (yet)\n");
+ return -1;
+ }
+ }
+
rc = common_bind(&in->c);
if (rc != 0)
return rc;
#define TARGET_NR_signalfd 476
#define TARGET_NR_timerfd 477
#define TARGET_NR_eventfd 478
-
+#define TARGET_NR_recvmmsg 479
+#define TARGET_NR_fallocate 480
+#define TARGET_NR_timerfd_create 481
+#define TARGET_NR_timerfd_settime 482
+#define TARGET_NR_timerfd_gettime 483
+#define TARGET_NR_signalfd4 484
+#define TARGET_NR_eventfd2 485
+#define TARGET_NR_epoll_create1 486
+#define TARGET_NR_dup3 487
+#define TARGET_NR_pipe2 488
+#define TARGET_NR_inotify_init1 489
+#define TARGET_NR_preadv 490
+#define TARGET_NR_pwritev 491
+#define TARGET_NR_rt_tgsigqueueinfo 492
+#define TARGET_NR_perf_event_open 493
+#define TARGET_NR_fanotify_init 494
+#define TARGET_NR_fanotify_mark 495
+#define TARGET_NR_prlimit64 496
+#define TARGET_NR_name_to_handle_at 497
+#define TARGET_NR_open_by_handle_at 498
+#define TARGET_NR_clock_adjtime 499
+#define TARGET_NR_syncfs 500
#define TARGET_NR_dup3 (358)
#define TARGET_NR_pipe2 (359)
#define TARGET_NR_inotify_init1 (360)
+#define TARGET_NR_preadv (361)
+#define TARGET_NR_pwritev (362)
+#define TARGET_NR_rt_tgsigqueueinfo (363)
+#define TARGET_NR_perf_event_open (364)
+#define TARGET_NR_recvmmsg (365)
+#define TARGET_NR_accept4 (366)
+#define TARGET_NR_fanotify_init (367)
+#define TARGET_NR_fanotify_mark (368)
+#define TARGET_NR_prlimit64 (369)
+#define TARGET_NR_name_to_handle_at (370)
+#define TARGET_NR_open_by_handle_at (371)
+#define TARGET_NR_clock_adjtime (372)
+#define TARGET_NR_syncfs (373)
#define TARGET_NR_dup3 330
#define TARGET_NR_pipe2 331
#define TARGET_NR_inotify_init1 332
+#define TARGET_NR_preadv 333
+#define TARGET_NR_pwritev 334
#define TARGET_NR_dup3 330
#define TARGET_NR_pipe2 331
#define TARGET_NR_inotify_init1 332
+#define TARGET_NR_preadv 333
+#define TARGET_NR_pwritev 334
+#define TARGET_NR_rt_tgsigqueueinfo 335
+#define TARGET_NR_perf_event_open 336
+#define TARGET_NR_recvmmsg 337
+#define TARGET_NR_fanotify_init 338
+#define TARGET_NR_fanotify_mark 339
+#define TARGET_NR_prlimit64 340
+#define TARGET_NR_name_to_handle_at 341
+#define TARGET_NR_open_by_handle_at 342
+#define TARGET_NR_clock_adjtime 343
+#define TARGET_NR_syncfs 344
IOCTL(KDSKBMODE, 0, TYPE_INT)
IOCTL(KDGKBENT, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_kbentry)))
IOCTL(KDGKBSENT, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_kbsentry)))
+ IOCTL(KDGKBLED, 0, TYPE_INT)
+ IOCTL(KDSKBLED, 0, TYPE_INT)
+ IOCTL(KDGETLED, 0, TYPE_INT)
+ IOCTL(KDSETLED, 0, TYPE_INT)
IOCTL(BLKROSET, IOC_W, MK_PTR(TYPE_INT))
IOCTL(BLKROGET, IOC_R, MK_PTR(TYPE_INT))
IOCTL(FBIOGET_FSCREENINFO, IOC_R, MK_PTR(MK_STRUCT(STRUCT_fb_fix_screeninfo)))
IOCTL(FBIOGET_VSCREENINFO, IOC_R, MK_PTR(MK_STRUCT(STRUCT_fb_var_screeninfo)))
IOCTL(FBIOPUT_VSCREENINFO, IOC_W, MK_PTR(MK_STRUCT(STRUCT_fb_var_screeninfo)))
+ IOCTL(FBIOGETCMAP, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_fb_cmap)))
+ IOCTL(FBIOPUTCMAP, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_fb_cmap)))
+ IOCTL(FBIOPAN_DISPLAY, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_fb_var_screeninfo)))
+ IOCTL(FBIOGET_CON2FBMAP, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_fb_con2fbmap)))
+ IOCTL(FBIOPUT_CON2FBMAP, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_fb_con2fbmap)))
IOCTL(VT_OPENQRY, IOC_R, MK_PTR(TYPE_INT))
IOCTL(VT_GETSTATE, IOC_R, MK_PTR(MK_STRUCT(STRUCT_vt_stat)))
IOCTL(VT_WAITACTIVE, 0, TYPE_INT)
IOCTL(VT_LOCKSWITCH, 0, TYPE_INT)
IOCTL(VT_UNLOCKSWITCH, 0, TYPE_INT)
+ IOCTL(VT_GETMODE, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_vt_mode)))
+ IOCTL(VT_SETMODE, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_vt_mode)))
+ IOCTL(VT_RELDISP, 0, TYPE_INT)
+ IOCTL(VT_DISALLOCATE, 0, TYPE_INT)
#define TARGET_NR_dup3 326
#define TARGET_NR_pipe2 327
#define TARGET_NR_inotify_init1 328
+#define TARGET_NR_inotify_init1 328
+#define TARGET_NR_preadv 329
+#define TARGET_NR_pwritev 330
+#define TARGET_NR_rt_tgsigqueueinfo 331
+#define TARGET_NR_perf_event_open 332
+#define TARGET_NR_get_thread_area 333
+#define TARGET_NR_set_thread_area 334
+#define TARGET_NR_atomic_cmpxchg_32 335
+#define TARGET_NR_atomic_barrier 336
+#define TARGET_NR_fanotify_init 337
+#define TARGET_NR_fanotify_mark 338
+#define TARGET_NR_prlimit64 339
+#define TARGET_NR_name_to_handle_at 340
+#define TARGET_NR_open_by_handle_at 341
+#define TARGET_NR_clock_adjtime 342
+#define TARGET_NR_syncfs 343
MIPS_SYS(sys_getcwd , 2)
MIPS_SYS(sys_capget , 2)
MIPS_SYS(sys_capset , 2) /* 4205 */
- MIPS_SYS(sys_sigaltstack , 0)
+ MIPS_SYS(sys_sigaltstack , 2)
MIPS_SYS(sys_sendfile , 4)
MIPS_SYS(sys_ni_syscall , 0)
MIPS_SYS(sys_ni_syscall , 0)
MIPS_SYS(sys_epoll_pwait, 6)
MIPS_SYS(sys_ioprio_set, 3)
MIPS_SYS(sys_ioprio_get, 2)
+ MIPS_SYS(sys_utimensat, 4)
+ MIPS_SYS(sys_signalfd, 3)
+ MIPS_SYS(sys_ni_syscall, 0) /* was timerfd */
+ MIPS_SYS(sys_eventfd, 1)
+ MIPS_SYS(sys_fallocate, 6) /* 4320 */
+ MIPS_SYS(sys_timerfd_create, 2)
+ MIPS_SYS(sys_timerfd_gettime, 2)
+ MIPS_SYS(sys_timerfd_settime, 4)
+ MIPS_SYS(sys_signalfd4, 4)
+ MIPS_SYS(sys_eventfd2, 2) /* 4325 */
+ MIPS_SYS(sys_epoll_create1, 1)
+ MIPS_SYS(sys_dup3, 3)
+ MIPS_SYS(sys_pipe2, 2)
+ MIPS_SYS(sys_inotify_init1, 1)
+ MIPS_SYS(sys_preadv, 6) /* 4330 */
+ MIPS_SYS(sys_pwritev, 6)
+ MIPS_SYS(sys_rt_tgsigqueueinfo, 4)
+ MIPS_SYS(sys_perf_event_open, 5)
+ MIPS_SYS(sys_accept4, 4)
+ MIPS_SYS(sys_recvmmsg, 5) /* 4335 */
+ MIPS_SYS(sys_fanotify_init, 2)
+ MIPS_SYS(sys_fanotify_mark, 6)
+ MIPS_SYS(sys_prlimit64, 4)
+ MIPS_SYS(sys_name_to_handle_at, 5)
+ MIPS_SYS(sys_open_by_handle_at, 3) /* 4340 */
+ MIPS_SYS(sys_clock_adjtime, 2)
+ MIPS_SYS(sys_syncfs, 1)
};
#undef MIPS_SYS
syscall_num = env->active_tc.gpr[2] - 4000;
env->active_tc.PC += 4;
if (syscall_num >= sizeof(mips_syscall_args)) {
- ret = -ENOSYS;
+ ret = -TARGET_ENOSYS;
} else {
int nb_args;
abi_ulong sp_reg;
break;
case EXCP_TLBL:
case EXCP_TLBS:
+ case EXCP_AdEL:
+ case EXCP_AdES:
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
#define TARGET_NR_sendmsg 360 /* new */
#define TARGET_NR_recvmsg 361 /* new */
#define TARGET_NR_accept04 362 /* new */
-
-#define TARGET_NR_syscalls 363
+#define TARGET_NR_preadv 363 /* new */
+#define TARGET_NR_pwritev 364 /* new */
+#define TARGET_NR_rt_tgsigqueueinfo 365 /* new */
+#define TARGET_NR_perf_event_open 366 /* new */
+#define TARGET_NR_recvmmsg 367 /* new */
+#define TARGET_NR_fanotify_init 368
+#define TARGET_NR_fanotify_mark 369
+#define TARGET_NR_prlimit64 370
+#define TARGET_NR_name_to_handle_at 371
+#define TARGET_NR_open_by_handle_at 372
+#define TARGET_NR_clock_adjtime 373
+#define TARGET_NR_syncfs 374
#define TARGET_NR_dup3 (TARGET_NR_Linux + 327)
#define TARGET_NR_pipe2 (TARGET_NR_Linux + 328)
#define TARGET_NR_inotify_init1 (TARGET_NR_Linux + 329)
+#define TARGET_NR_preadv (TARGET_NR_Linux + 330)
+#define TARGET_NR_pwritev (TARGET_NR_Linux + 331)
+#define TARGET_NR_rt_tgsigqueueinfo (TARGET_NR_Linux + 332)
+#define TARGET_NR_perf_event_open (TARGET_NR_Linux + 333)
+#define TARGET_NR_accept4 (TARGET_NR_Linux + 334)
+#define TARGET_NR_recvmmsg (TARGET_NR_Linux + 335)
+#define TARGET_NR_fanotify_init (TARGET_NR_Linux + 336)
+#define TARGET_NR_fanotify_mark (TARGET_NR_Linux + 337)
+#define TARGET_NR_prlimit64 (TARGET_NR_Linux + 338)
+#define TARGET_NR_name_to_handle_at (TARGET_NR_Linux + 339)
+#define TARGET_NR_open_by_handle_at (TARGET_NR_Linux + 340)
+#define TARGET_NR_clock_adjtime (TARGET_NR_Linux + 341)
+#define TARGET_NR_syncfs (TARGET_NR_Linux + 342)
#define TARGET_NR_dup3 (TARGET_NR_Linux + 286)
#define TARGET_NR_pipe2 (TARGET_NR_Linux + 287)
#define TARGET_NR_inotify_init1 (TARGET_NR_Linux + 288)
+#define TARGET_NR_preadv (TARGET_NR_Linux + 289)
+#define TARGET_NR_pwritev (TARGET_NR_Linux + 290)
+#define TARGET_NR_rt_tgsigqueueinfo (TARGET_NR_Linux + 291)
+#define TARGET_NR_perf_event_open (TARGET_NR_Linux + 292)
+#define TARGET_NR_accept4 (TARGET_NR_Linux + 293)
+#define TARGET_NR_recvmmsg (TARGET_NR_Linux + 294)
+#define TARGET_NR_fanotify_init (TARGET_NR_Linux + 295)
+#define TARGET_NR_fanotify_mark (TARGET_NR_Linux + 296)
+#define TARGET_NR_prlimit64 (TARGET_NR_Linux + 297)
+#define TARGET_NR_name_to_handle_at (TARGET_NR_Linux + 298)
+#define TARGET_NR_open_by_handle_at (TARGET_NR_Linux + 299)
+#define TARGET_NR_clock_adjtime (TARGET_NR_Linux + 300)
+#define TARGET_NR_syncfs (TARGET_NR_Linux + 301)
#define TARGET_NR_dup3 (TARGET_NR_Linux + 290)
#define TARGET_NR_pipe2 (TARGET_NR_Linux + 291)
#define TARGET_NR_inotify_init1 (TARGET_NR_Linux + 292)
+#define TARGET_NR_preadv (TARGET_NR_Linux + 293)
+#define TARGET_NR_pwritev (TARGET_NR_Linux + 294)
+#define TARGET_NR_rt_tgsigqueueinfo (TARGET_NR_Linux + 295)
+#define TARGET_NR_perf_event_open (TARGET_NR_Linux + 296)
+#define TARGET_NR_accept4 (TARGET_NR_Linux + 297)
+#define TARGET_NR_recvmmsg (TARGET_NR_Linux + 298)
+#define TARGET_NR_getdents64 (TARGET_NR_Linux + 299)
+#define TARGET_NR_fanotify_init (TARGET_NR_Linux + 300)
+#define TARGET_NR_fanotify_mark (TARGET_NR_Linux + 301)
+#define TARGET_NR_prlimit64 (TARGET_NR_Linux + 302)
+#define TARGET_NR_name_to_handle_at (TARGET_NR_Linux + 303)
+#define TARGET_NR_open_by_handle_at (TARGET_NR_Linux + 304)
+#define TARGET_NR_clock_adjtime (TARGET_NR_Linux + 305)
+#define TARGET_NR_syncfs (TARGET_NR_Linux + 306)
#define TARGET_NR_dup3 316
#define TARGET_NR_pipe2 317
#define TARGET_NR_inotify_init1 318
+#define TARGET_NR_perf_event_open 319
+#define TARGET_NR_preadv 320
+#define TARGET_NR_pwritev 321
+#define TARGET_NR_rt_tgsigqueueinfo 322
+#define TARGET_NR_fanotify_init 323
+#define TARGET_NR_fanotify_mark 324
+#define TARGET_NR_prlimit64 325
+#define TARGET_NR_socket 326
+#define TARGET_NR_bind 327
+#define TARGET_NR_connect 328
+#define TARGET_NR_listen 329
+#define TARGET_NR_accept 330
+#define TARGET_NR_getsockname 331
+#define TARGET_NR_getpeername 332
+#define TARGET_NR_socketpair 333
+#define TARGET_NR_send 334
+#define TARGET_NR_sendto 335
+#define TARGET_NR_recv 336
+#define TARGET_NR_recvfrom 337
+#define TARGET_NR_shutdown 338
+#define TARGET_NR_setsockopt 339
+#define TARGET_NR_getsockopt 340
+#define TARGET_NR_sendmsg 341
+#define TARGET_NR_recvmsg 342
+#define TARGET_NR_recvmmsg 343
+#define TARGET_NR_accept4 344
+#define TARGET_NR_name_to_handle_at 345
+#define TARGET_NR_open_by_handle_at 346
+#define TARGET_NR_clock_adjtime 347
+#define TARGET_NR_syncfs 348
#define TARGET_NR_pipe2 325
#define TARGET_NR_dup3 326
#define TARGET_NR_epoll_create1 327
-#undef NR_syscalls
-#define NR_syscalls 328
+#define TARGET_NR_preadv 328
+#define TARGET_NR_pwritev 329
+#define TARGET_NR_rt_tgsigqueueinfo 330
+#define TARGET_NR_perf_event_open 331
+#define TARGET_NR_fanotify_init 332
+#define TARGET_NR_fanotify_mark 333
+#define TARGET_NR_prlimit64 334
+#define TARGET_NR_name_to_handle_at 335
+#define TARGET_NR_open_by_handle_at 336
+#define TARGET_NR_clock_adjtime 337
+#define TARGET_NR_syncfs 338
/*
* There are some system calls that are not present on 64 bit, some
#define TARGET_NR_clone 120
#define TARGET_NR_setdomainname 121
#define TARGET_NR_uname 122
-#define TARGET_NR_modify_ldt 123
+#define TARGET_NR_cacheflush 123
#define TARGET_NR_adjtimex 124
#define TARGET_NR_mprotect 125
#define TARGET_NR_sigprocmask 126
#define TARGET_NR_dup3 330
#define TARGET_NR_pipe2 331
#define TARGET_NR_inotify_init1 332
+#define TARGET_NR_preadv 333
+#define TARGET_NR_pwritev 334
+#define TARGET_NR_rt_tgsigqueueinfo 335
+#define TARGET_NR_perf_event_open 336
+#define TARGET_NR_fanotify_init 337
+#define TARGET_NR_fanotify_mark 338
+#define TARGET_NR_prlimit64 339
+
+/* Non-multiplexed socket family */
+#define TARGET_NR_socket 340
+#define TARGET_NR_bind 341
+#define TARGET_NR_connect 342
+#define TARGET_NR_listen 343
+#define TARGET_NR_accept 344
+#define TARGET_NR_getsockname 345
+#define TARGET_NR_getpeername 346
+#define TARGET_NR_socketpair 347
+#define TARGET_NR_send 348
+#define TARGET_NR_sendto 349
+#define TARGET_NR_recv 350
+#define TARGET_NR_recvfrom 351
+#define TARGET_NR_shutdown 352
+#define TARGET_NR_setsockopt 353
+#define TARGET_NR_getsockopt 354
+#define TARGET_NR_sendmsg 355
+#define TARGET_NR_recvmsg 356
+#define TARGET_NR_recvmmsg 357
+#define TARGET_NR_accept4 358
+#define TARGET_NR_name_to_handle_at 359
+#define TARGET_NR_open_by_handle_at 360
+#define TARGET_NR_clock_adjtime 361
+#define TARGET_NR_syncfs 362
} sigframe;
struct target_ucontext {
- target_ulong uc_flags;
- struct target_ucontext *uc_link;
- target_stack_t uc_stack;
- target_sigregs uc_mcontext;
- target_sigset_t uc_sigmask; /* mask last for extensibility */
+ target_ulong tuc_flags;
+ struct target_ucontext *tuc_link;
+ target_stack_t tuc_stack;
+ target_sigregs tuc_mcontext;
+ target_sigset_t tuc_sigmask; /* mask last for extensibility */
};
typedef struct {
}
/* Create the ucontext. */
- __put_user(0, &frame->uc.uc_flags);
- __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.uc_link);
- __put_user(target_sigaltstack_used.ss_sp, &frame->uc.uc_stack.ss_sp);
+ __put_user(0, &frame->uc.tuc_flags);
+ __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
+ __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
__put_user(sas_ss_flags(get_sp_from_cpustate(env)),
- &frame->uc.uc_stack.ss_flags);
- __put_user(target_sigaltstack_used.ss_size, &frame->uc.uc_stack.ss_size);
- save_sigregs(env, &frame->uc.uc_mcontext);
+ &frame->uc.tuc_stack.ss_flags);
+ __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
+ save_sigregs(env, &frame->uc.tuc_mcontext);
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
__put_user((abi_ulong)set->sig[i],
- (abi_ulong *)&frame->uc.uc_sigmask.sig[i]);
+ (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
}
/* Set up to return from userspace. If provided, use a stub
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
goto badframe;
}
- target_to_host_sigset(&set, &frame->uc.uc_sigmask);
+ target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
- if (restore_sigregs(env, &frame->uc.uc_mcontext)) {
+ if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
goto badframe;
}
- if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.uc_stack), 0,
+ if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
get_sp_from_cpustate(env)) == -EFAULT) {
goto badframe;
}
#define TARGET_NR_pipe2 321
#define TARGET_NR_inotify_init1 322
#define TARGET_NR_accept4 323
+#define TARGET_NR_preadv 324
+#define TARGET_NR_pwritev 325
+#define TARGET_NR_rt_tgsigqueueinfo 326
+#define TARGET_NR_perf_event_open 327
+#define TARGET_NR_recvmmsg 328
+#define TARGET_NR_fanotify_init 329
+#define TARGET_NR_fanotify_mark 330
+#define TARGET_NR_prlimit64 331
+#define TARGET_NR_name_to_handle_at 332
+#define TARGET_NR_open_by_handle_at 333
+#define TARGET_NR_clock_adjtime 334
+#define TARGET_NR_syncfs 335
#define TARGET_NR_pipe2 321
#define TARGET_NR_inotify_init1 322
#define TARGET_NR_accept4 323
+#define TARGET_NR_preadv 324
+#define TARGET_NR_pwritev 325
+#define TARGET_NR_rt_tgsigqueueinfo 326
+#define TARGET_NR_perf_event_open 327
+#define TARGET_NR_recvmmsg 328
+#define TARGET_NR_fanotify_init 329
+#define TARGET_NR_fanotify_mark 330
+#define TARGET_NR_prlimit64 331
+#define TARGET_NR_name_to_handle_at 332
+#define TARGET_NR_open_by_handle_at 333
+#define TARGET_NR_clock_adjtime 334
+#define TARGET_NR_syncfs 335
fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
#endif
+#if defined(TARGET_NR_prlimit64)
+#ifndef __NR_prlimit64
+# define __NR_prlimit64 -1
+#endif
+#define __NR_sys_prlimit64 __NR_prlimit64
+/* The glibc rlimit structure may not be that used by the underlying syscall */
+struct host_rlimit64 {
+ uint64_t rlim_cur;
+ uint64_t rlim_max;
+};
+_syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
+ const struct host_rlimit64 *, new_limit,
+ struct host_rlimit64 *, old_limit)
+#endif
+
extern int personality(int);
extern int flock(int, int);
extern int setfsuid(int);
extern int setfsgid(int);
extern int setgroups(int, gid_t *);
+/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
+#ifdef TARGET_ARM
+static inline int regpairs_aligned(void *cpu_env) {
+ return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
+}
+#elif defined(TARGET_MIPS)
+static inline int regpairs_aligned(void *cpu_env) { return 1; }
+#else
+static inline int regpairs_aligned(void *cpu_env) { return 0; }
+#endif
+
#define ERRNO_TABLE_SIZE 1200
/* target_to_host_errno_table[] is initialized from
static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
{
- if (target_rlim == TARGET_RLIM_INFINITY)
- return RLIM_INFINITY;
+ target_ulong target_rlim_swap;
+ rlim_t result;
+
+ target_rlim_swap = tswapl(target_rlim);
+ if (target_rlim_swap == TARGET_RLIM_INFINITY || target_rlim_swap != (rlim_t)target_rlim_swap)
+ result = RLIM_INFINITY;
else
- return tswapl(target_rlim);
+ result = target_rlim_swap;
+
+ return result;
}
static inline target_ulong host_to_target_rlim(rlim_t rlim)
{
+ target_ulong target_rlim_swap;
+ target_ulong result;
+
if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
- return TARGET_RLIM_INFINITY;
+ target_rlim_swap = TARGET_RLIM_INFINITY;
else
- return tswapl(rlim);
+ target_rlim_swap = rlim;
+ result = tswapl(target_rlim_swap);
+
+ return result;
+}
+
+static inline int target_to_host_resource(int code)
+{
+ switch (code) {
+ case TARGET_RLIMIT_AS:
+ return RLIMIT_AS;
+ case TARGET_RLIMIT_CORE:
+ return RLIMIT_CORE;
+ case TARGET_RLIMIT_CPU:
+ return RLIMIT_CPU;
+ case TARGET_RLIMIT_DATA:
+ return RLIMIT_DATA;
+ case TARGET_RLIMIT_FSIZE:
+ return RLIMIT_FSIZE;
+ case TARGET_RLIMIT_LOCKS:
+ return RLIMIT_LOCKS;
+ case TARGET_RLIMIT_MEMLOCK:
+ return RLIMIT_MEMLOCK;
+ case TARGET_RLIMIT_MSGQUEUE:
+ return RLIMIT_MSGQUEUE;
+ case TARGET_RLIMIT_NICE:
+ return RLIMIT_NICE;
+ case TARGET_RLIMIT_NOFILE:
+ return RLIMIT_NOFILE;
+ case TARGET_RLIMIT_NPROC:
+ return RLIMIT_NPROC;
+ case TARGET_RLIMIT_RSS:
+ return RLIMIT_RSS;
+ case TARGET_RLIMIT_RTPRIO:
+ return RLIMIT_RTPRIO;
+ case TARGET_RLIMIT_SIGPENDING:
+ return RLIMIT_SIGPENDING;
+ case TARGET_RLIMIT_STACK:
+ return RLIMIT_STACK;
+ default:
+ return code;
+ }
}
static inline abi_long copy_from_user_timeval(struct timeval *tv,
abi_long arg3,
abi_long arg4)
{
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
- {
+ if (regpairs_aligned(cpu_env)) {
arg2 = arg3;
arg3 = arg4;
- }
-#endif
+ }
return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
}
#endif
abi_long arg3,
abi_long arg4)
{
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
- {
+ if (regpairs_aligned(cpu_env)) {
arg2 = arg3;
arg3 = arg4;
- }
-#endif
+ }
return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
}
#endif
break;
case TARGET_NR_setrlimit:
{
- int resource = arg1;
+ int resource = target_to_host_resource(arg1);
struct target_rlimit *target_rlim;
struct rlimit rlim;
if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
break;
case TARGET_NR_getrlimit:
{
- int resource = arg1;
+ int resource = target_to_host_resource(arg1);
struct target_rlimit *target_rlim;
struct rlimit rlim;
if (arg_sigset) {
sig.set = &set;
+ if (arg_sigsize != sizeof(*target_sigset)) {
+ /* Like the kernel, we enforce correct size sigsets */
+ ret = -TARGET_EINVAL;
+ goto fail;
+ }
target_sigset = lock_user(VERIFY_READ, arg_sigset,
sizeof(*target_sigset), 1);
if (!target_sigset) {
#endif
#ifdef TARGET_NR_pread
case TARGET_NR_pread:
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
+ if (regpairs_aligned(cpu_env))
arg4 = arg5;
-#endif
if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
goto efault;
ret = get_errno(pread(arg1, p, arg3, arg4));
unlock_user(p, arg2, ret);
break;
case TARGET_NR_pwrite:
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
+ if (regpairs_aligned(cpu_env))
arg4 = arg5;
-#endif
if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
goto efault;
ret = get_errno(pwrite(arg1, p, arg3, arg4));
case TARGET_NR_ugetrlimit:
{
struct rlimit rlim;
- ret = get_errno(getrlimit(arg1, &rlim));
+ int resource = target_to_host_resource(arg1);
+ ret = get_errno(getrlimit(resource, &rlim));
if (!is_error(ret)) {
struct target_rlimit *target_rlim;
if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
#ifdef TARGET_NR_readahead
case TARGET_NR_readahead:
#if TARGET_ABI_BITS == 32
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
- {
+ if (regpairs_aligned(cpu_env)) {
arg2 = arg3;
arg3 = arg4;
arg4 = arg5;
}
-#endif
ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
#else
ret = get_errno(readahead(arg1, arg2, arg3));
break;
}
#endif
+#endif
+#ifdef TARGET_NR_prlimit64
+ case TARGET_NR_prlimit64:
+ {
+ /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
+ struct target_rlimit64 *target_rnew, *target_rold;
+ struct host_rlimit64 rnew, rold, *rnewp = 0;
+ if (arg3) {
+ if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
+ goto efault;
+ }
+ rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
+ rnew.rlim_max = tswap64(target_rnew->rlim_max);
+ unlock_user_struct(target_rnew, arg3, 0);
+ rnewp = &rnew;
+ }
+
+ ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
+ if (!is_error(ret) && arg4) {
+ if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
+ goto efault;
+ }
+ target_rold->rlim_cur = tswap64(rold.rlim_cur);
+ target_rold->rlim_max = tswap64(rold.rlim_max);
+ unlock_user_struct(target_rold, arg4, 1);
+ }
+ break;
+ }
#endif
default:
unimplemented:
#define TARGET_RLIM_INFINITY ((target_ulong)~0UL)
#endif
+#if defined(TARGET_MIPS)
+#define TARGET_RLIMIT_CPU 0
+#define TARGET_RLIMIT_FSIZE 1
+#define TARGET_RLIMIT_DATA 2
+#define TARGET_RLIMIT_STACK 3
+#define TARGET_RLIMIT_CORE 4
+#define TARGET_RLIMIT_RSS 7
+#define TARGET_RLIMIT_NPROC 8
+#define TARGET_RLIMIT_NOFILE 5
+#define TARGET_RLIMIT_MEMLOCK 9
+#define TARGET_RLIMIT_AS 6
+#define TARGET_RLIMIT_LOCKS 10
+#define TARGET_RLIMIT_SIGPENDING 11
+#define TARGET_RLIMIT_MSGQUEUE 12
+#define TARGET_RLIMIT_NICE 13
+#define TARGET_RLIMIT_RTPRIO 14
+#else
+#define TARGET_RLIMIT_CPU 0
+#define TARGET_RLIMIT_FSIZE 1
+#define TARGET_RLIMIT_DATA 2
+#define TARGET_RLIMIT_STACK 3
+#define TARGET_RLIMIT_CORE 4
+#define TARGET_RLIMIT_RSS 5
+#define TARGET_RLIMIT_NPROC 6
+#define TARGET_RLIMIT_NOFILE 7
+#define TARGET_RLIMIT_MEMLOCK 8
+#define TARGET_RLIMIT_AS 9
+#define TARGET_RLIMIT_LOCKS 10
+#define TARGET_RLIMIT_SIGPENDING 11
+#define TARGET_RLIMIT_MSGQUEUE 12
+#define TARGET_RLIMIT_NICE 13
+#define TARGET_RLIMIT_RTPRIO 14
+#endif
+
struct target_pollfd {
int fd; /* file descriptor */
short events; /* requested events */
#define TARGET_KDSKBMODE 0x4b45
#define TARGET_KDGKBENT 0x4B46 /* gets one entry in translation table */
#define TARGET_KDGKBSENT 0x4B48 /* gets one function key string entry */
+#define TARGET_KDGKBLED 0x4B64 /* get led flags (not lights) */
+#define TARGET_KDSKBLED 0x4B65 /* set led flags (not lights) */
+#define TARGET_KDGETLED 0x4B31 /* return current led state */
+#define TARGET_KDSETLED 0x4B32 /* set led state [lights, not flags] */
#define TARGET_SIOCATMARK 0x8905
#define TARGET_FBIOGET_VSCREENINFO 0x4600
#define TARGET_FBIOPUT_VSCREENINFO 0x4601
#define TARGET_FBIOGET_FSCREENINFO 0x4602
+#define TARGET_FBIOGETCMAP 0x4604
+#define TARGET_FBIOPUTCMAP 0x4605
+#define TARGET_FBIOPAN_DISPLAY 0x4606
+#define TARGET_FBIOGET_CON2FBMAP 0x460F
+#define TARGET_FBIOPUT_CON2FBMAP 0x4610
/* vt ioctls */
#define TARGET_VT_OPENQRY 0x5600
#define TARGET_VT_WAITACTIVE 0x5607
#define TARGET_VT_LOCKSWITCH 0x560b
#define TARGET_VT_UNLOCKSWITCH 0x560c
+#define TARGET_VT_GETMODE 0x5601
+#define TARGET_VT_SETMODE 0x5602
+#define TARGET_VT_RELDISP 0x5605
+#define TARGET_VT_DISALLOCATE 0x5608
/* from asm/termbits.h */
target_epoll_data_t data;
};
#endif
+struct target_rlimit64 {
+ uint64_t rlim_cur;
+ uint64_t rlim_max;
+};
TYPE_INT, /* rotate */
MK_ARRAY(TYPE_INT, 5)) /* reserved */
+STRUCT(fb_cmap,
+ TYPE_INT, /* start */
+ TYPE_INT, /* len */
+ TYPE_PTRVOID, /* red */
+ TYPE_PTRVOID, /* green */
+ TYPE_PTRVOID, /* blue */
+ TYPE_PTRVOID) /* transp */
+
+STRUCT(fb_con2fbmap,
+ TYPE_INT, /* console */
+ TYPE_INT) /* framebuffer */
+
+
STRUCT(vt_stat,
TYPE_SHORT, /* v_active */
TYPE_SHORT, /* v_signal */
TYPE_SHORT) /* v_state */
+STRUCT(vt_mode,
+ TYPE_CHAR, /* mode */
+ TYPE_CHAR, /* waitv */
+ TYPE_SHORT, /* relsig */
+ TYPE_SHORT, /* acqsig */
+ TYPE_SHORT) /* frsig */
+
STRUCT(fiemap_extent,
TYPE_ULONGLONG, /* fe_logical */
TYPE_ULONGLONG, /* fe_physical */
#define TARGET_NR_dup3 292
#define TARGET_NR_pipe2 293
#define TARGET_NR_inotify_init1 294
+#define TARGET_NR_preadv 295
+#define TARGET_NR_pwritev 296
+#define TARGET_NR_rt_tgsigqueueinfo 297
+#define TARGET_NR_perf_event_open 298
+#define TARGET_NR_recvmmsg 299
+#define TARGET_NR_fanotify_init 300
+#define TARGET_NR_fanotify_mark 301
+#define TARGET_NR_prlimit64 302
+#define TARGET_NR_name_to_handle_at 303
+#define TARGET_NR_open_by_handle_at 304
+#define TARGET_NR_clock_adjtime 305
+#define TARGET_NR_syncfs 306
- OpenBIOS (http://www.openbios.org/) is a free (GPL v2) portable
firmware implementation. The goal is to implement a 100% IEEE
1275-1994 (referred to as Open Firmware) compliant firmware.
- The included image for PowerPC (for 32 and 64 bit PPC CPUs),
-
- PowerPC is built from OpenBIOS SVN revision 1044
- Sparc32 and Sparc64 are built from OpenBIOS SVN revision 1018.
+ The included image for PowerPC (for 32 and 64 bit PPC CPUs)
+ is built from OpenBIOS SVN revision 1044 and Sparc32 and Sparc64
+ images are built from OpenBIOS SVN revision 1045.
- SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware
implementation for certain IBM POWER hardware. The sources are at
#pragma GCC poison CPUState
#pragma GCC poison env
+#pragma GCC poison lduw_phys
+#pragma GCC poison ldl_phys
+#pragma GCC poison ldq_phys
+#pragma GCC poison stl_phys_notdirty
+#pragma GCC poison stq_phys_notdirty
+#pragma GCC poison stw_phys
+#pragma GCC poison stl_phys
+#pragma GCC poison stq_phys
+
#pragma GCC poison CPU_INTERRUPT_HARD
#pragma GCC poison CPU_INTERRUPT_EXITTB
#pragma GCC poison CPU_INTERRUPT_HALT
}
# , must have a space on the right.
+ # not required when having a single },{ on one line
} elsif ($op eq ',') {
- if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) {
+ if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/ &&
+ ($elements[$n] . $elements[$n + 2]) !~ " *}{") {
ERROR("space required after that '$op' $at\n" . $hereptr);
}
}
if (!defined $suppress_ifbraces{$linenr - 1} &&
$line =~ /\b(if|while|for|else)\b/ &&
+ $line !~ /\#\s*if/ &&
$line !~ /\#\s*else/) {
my $allowed = 0;
void cpu_alpha_store_fpcr (CPUState *env, uint64_t val);
#ifndef CONFIG_USER_ONLY
void swap_shadow_regs(CPUState *env);
-extern QEMU_NORETURN void do_unassigned_access(target_phys_addr_t addr,
- int, int, int, int);
+QEMU_NORETURN void cpu_unassigned_access(CPUState *env1,
+ target_phys_addr_t addr, int is_write,
+ int is_exec, int unused, int size);
#endif
/* Bits in TB->FLAGS that control how translation is processed. */
helper_excp(EXCP_UNALIGN, 0);
}
-void QEMU_NORETURN do_unassigned_access(target_phys_addr_t addr, int is_write,
- int is_exec, int unused, int size)
+void QEMU_NORETURN cpu_unassigned_access(CPUState *env1,
+ target_phys_addr_t addr, int is_write,
+ int is_exec, int unused, int size)
{
+ env = env1;
env->trap_arg0 = addr;
env->trap_arg1 = is_write;
dynamic_excp(EXCP_MCHK, 0);
uint32_t c7_par; /* Translation result. */
uint32_t c9_insn; /* Cache lockdown registers. */
uint32_t c9_data;
+ uint32_t c9_pmcr; /* performance monitor control register */
+ uint32_t c9_pmcnten; /* perf monitor counter enables */
+ uint32_t c9_pmovsr; /* perf monitor overflow status */
+ uint32_t c9_pmxevtyper; /* perf monitor event type */
+ uint32_t c9_pmuserenr; /* perf monitor user enable */
+ uint32_t c9_pminten; /* perf monitor interrupt enables */
uint32_t c13_fcse; /* FCSE PID. */
uint32_t c13_context; /* Context ID. */
uint32_t c13_tls1; /* User RW Thread register. */
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list
-#define CPU_SAVE_VERSION 3
+#define CPU_SAVE_VERSION 4
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
}
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
env->cp15.c2_base_mask = 0xffffc000u;
+ /* v7 performance monitor control register: same implementor
+ * field as main ID register, and we implement no event counters.
+ */
+ env->cp15.c9_pmcr = (id & 0xff000000);
#endif
set_flush_to_zero(1, &env->vfp.standard_fp_status);
set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
case 1: /* TCM memory region registers. */
/* Not implemented. */
goto bad_reg;
+ case 12: /* Performance monitor control */
+ /* Performance monitors are implementation defined in v7,
+ * but with an ARM recommended set of registers, which we
+ * follow (although we don't actually implement any counters)
+ */
+ if (!arm_feature(env, ARM_FEATURE_V7)) {
+ goto bad_reg;
+ }
+ switch (op2) {
+ case 0: /* performance monitor control register */
+ /* only the DP, X, D and E bits are writable */
+ env->cp15.c9_pmcr &= ~0x39;
+ env->cp15.c9_pmcr |= (val & 0x39);
+ break;
+ case 1: /* Count enable set register */
+ val &= (1 << 31);
+ env->cp15.c9_pmcnten |= val;
+ break;
+ case 2: /* Count enable clear */
+ val &= (1 << 31);
+ env->cp15.c9_pmcnten &= ~val;
+ break;
+ case 3: /* Overflow flag status */
+ env->cp15.c9_pmovsr &= ~val;
+ break;
+ case 4: /* Software increment */
+ /* RAZ/WI since we don't implement the software-count event */
+ break;
+ case 5: /* Event counter selection register */
+ /* Since we don't implement any events, writing to this register
+ * is actually UNPREDICTABLE. So we choose to RAZ/WI.
+ */
+ break;
+ default:
+ goto bad_reg;
+ }
+ break;
+ case 13: /* Performance counters */
+ if (!arm_feature(env, ARM_FEATURE_V7)) {
+ goto bad_reg;
+ }
+ switch (op2) {
+ case 0: /* Cycle count register: not implemented, so RAZ/WI */
+ break;
+ case 1: /* Event type select */
+ env->cp15.c9_pmxevtyper = val & 0xff;
+ break;
+ case 2: /* Event count register */
+ /* Unimplemented (we have no events), RAZ/WI */
+ break;
+ default:
+ goto bad_reg;
+ }
+ break;
+ case 14: /* Performance monitor control */
+ if (!arm_feature(env, ARM_FEATURE_V7)) {
+ goto bad_reg;
+ }
+ switch (op2) {
+ case 0: /* user enable */
+ env->cp15.c9_pmuserenr = val & 1;
+ /* changes access rights for cp registers, so flush tbs */
+ tb_flush(env);
+ break;
+ case 1: /* interrupt enable set */
+ /* We have no event counters so only the C bit can be changed */
+ val &= (1 << 31);
+ env->cp15.c9_pminten |= val;
+ break;
+ case 2: /* interrupt enable clear */
+ val &= (1 << 31);
+ env->cp15.c9_pminten &= ~val;
+ break;
+ }
+ break;
default:
goto bad_reg;
}
return 0;
case 8: /* MMU TLB control. */
goto bad_reg;
- case 9: /* Cache lockdown. */
- switch (op1) {
- case 0: /* L1 cache. */
- if (arm_feature(env, ARM_FEATURE_OMAPCP))
- return 0;
+ case 9:
+ switch (crm) {
+ case 0: /* Cache lockdown */
+ switch (op1) {
+ case 0: /* L1 cache. */
+ if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
+ return 0;
+ }
+ switch (op2) {
+ case 0:
+ return env->cp15.c9_data;
+ case 1:
+ return env->cp15.c9_insn;
+ default:
+ goto bad_reg;
+ }
+ case 1: /* L2 cache */
+ if (crm != 0) {
+ goto bad_reg;
+ }
+ /* L2 Lockdown and Auxiliary control. */
+ return 0;
+ default:
+ goto bad_reg;
+ }
+ break;
+ case 12: /* Performance monitor control */
+ if (!arm_feature(env, ARM_FEATURE_V7)) {
+ goto bad_reg;
+ }
switch (op2) {
- case 0:
- return env->cp15.c9_data;
- case 1:
- return env->cp15.c9_insn;
+ case 0: /* performance monitor control register */
+ return env->cp15.c9_pmcr;
+ case 1: /* count enable set */
+ case 2: /* count enable clear */
+ return env->cp15.c9_pmcnten;
+ case 3: /* overflow flag status */
+ return env->cp15.c9_pmovsr;
+ case 4: /* software increment */
+ case 5: /* event counter selection register */
+ return 0; /* Unimplemented, RAZ/WI */
default:
goto bad_reg;
}
- case 1: /* L2 cache */
- if (crm != 0)
+ case 13: /* Performance counters */
+ if (!arm_feature(env, ARM_FEATURE_V7)) {
+ goto bad_reg;
+ }
+ switch (op2) {
+ case 1: /* Event type select */
+ return env->cp15.c9_pmxevtyper;
+ case 0: /* Cycle count register */
+ case 2: /* Event count register */
+ /* Unimplemented, so RAZ/WI */
+ return 0;
+ default:
+ goto bad_reg;
+ }
+ case 14: /* Performance monitor control */
+ if (!arm_feature(env, ARM_FEATURE_V7)) {
goto bad_reg;
- /* L2 Lockdown and Auxiliary control. */
- return 0;
+ }
+ switch (op2) {
+ case 0: /* user enable */
+ return env->cp15.c9_pmuserenr;
+ case 1: /* interrupt enable set */
+ case 2: /* interrupt enable clear */
+ return env->cp15.c9_pminten;
+ default:
+ goto bad_reg;
+ }
default:
goto bad_reg;
}
+ break;
case 10: /* MMU TLB lockdown. */
/* ??? TLB lockdown not implemented. */
return 0;
return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
case 16: /* PRIMASK */
return (env->uncached_cpsr & CPSR_I) != 0;
- case 17: /* FAULTMASK */
- return (env->uncached_cpsr & CPSR_F) != 0;
- case 18: /* BASEPRI */
- case 19: /* BASEPRI_MAX */
+ case 17: /* BASEPRI */
+ case 18: /* BASEPRI_MAX */
return env->v7m.basepri;
+ case 19: /* FAULTMASK */
+ return (env->uncached_cpsr & CPSR_F) != 0;
case 20: /* CONTROL */
return env->v7m.control;
default:
else
env->uncached_cpsr &= ~CPSR_I;
break;
- case 17: /* FAULTMASK */
- if (val & 1)
- env->uncached_cpsr |= CPSR_F;
- else
- env->uncached_cpsr &= ~CPSR_F;
- break;
- case 18: /* BASEPRI */
+ case 17: /* BASEPRI */
env->v7m.basepri = val & 0xff;
break;
- case 19: /* BASEPRI_MAX */
+ case 18: /* BASEPRI_MAX */
val &= 0xff;
if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
env->v7m.basepri = val;
break;
+ case 19: /* FAULTMASK */
+ if (val & 1)
+ env->uncached_cpsr |= CPSR_F;
+ else
+ env->uncached_cpsr &= ~CPSR_F;
+ break;
case 20: /* CONTROL */
env->v7m.control = val & 3;
switch_v7m_sp(env, (val & 2) != 0);
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
#define VFP_BINOP(name) \
-float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
+float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
{ \
- return float32_ ## name (a, b, &env->vfp.fp_status); \
+ float_status *fpst = fpstp; \
+ return float32_ ## name(a, b, fpst); \
} \
-float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
+float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
{ \
- return float64_ ## name (a, b, &env->vfp.fp_status); \
+ float_status *fpst = fpstp; \
+ return float64_ ## name(a, b, fpst); \
}
VFP_BINOP(add)
VFP_BINOP(sub)
DEF_HELPER_1(vfp_get_fpscr, i32, env)
DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
-DEF_HELPER_3(vfp_adds, f32, f32, f32, env)
-DEF_HELPER_3(vfp_addd, f64, f64, f64, env)
-DEF_HELPER_3(vfp_subs, f32, f32, f32, env)
-DEF_HELPER_3(vfp_subd, f64, f64, f64, env)
-DEF_HELPER_3(vfp_muls, f32, f32, f32, env)
-DEF_HELPER_3(vfp_muld, f64, f64, f64, env)
-DEF_HELPER_3(vfp_divs, f32, f32, f32, env)
-DEF_HELPER_3(vfp_divd, f64, f64, f64, env)
+DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr)
DEF_HELPER_1(vfp_negs, f32, f32)
DEF_HELPER_1(vfp_negd, f64, f64)
DEF_HELPER_1(vfp_abss, f32, f32)
DEF_HELPER_2(ror_cc, i32, i32, i32)
/* neon_helper.c */
-DEF_HELPER_2(neon_qadd_u8, i32, i32, i32)
-DEF_HELPER_2(neon_qadd_s8, i32, i32, i32)
-DEF_HELPER_2(neon_qadd_u16, i32, i32, i32)
-DEF_HELPER_2(neon_qadd_s16, i32, i32, i32)
-DEF_HELPER_2(neon_qadd_u32, i32, i32, i32)
-DEF_HELPER_2(neon_qadd_s32, i32, i32, i32)
-DEF_HELPER_2(neon_qsub_u8, i32, i32, i32)
-DEF_HELPER_2(neon_qsub_s8, i32, i32, i32)
-DEF_HELPER_2(neon_qsub_u16, i32, i32, i32)
-DEF_HELPER_2(neon_qsub_s16, i32, i32, i32)
-DEF_HELPER_2(neon_qsub_u32, i32, i32, i32)
-DEF_HELPER_2(neon_qsub_s32, i32, i32, i32)
-DEF_HELPER_2(neon_qadd_u64, i64, i64, i64)
-DEF_HELPER_2(neon_qadd_s64, i64, i64, i64)
-DEF_HELPER_2(neon_qsub_u64, i64, i64, i64)
-DEF_HELPER_2(neon_qsub_s64, i64, i64, i64)
+DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qadd_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qsub_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qsub_s64, i64, env, i64, i64)
DEF_HELPER_2(neon_hadd_s8, i32, i32, i32)
DEF_HELPER_2(neon_hadd_u8, i32, i32, i32)
DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
-DEF_HELPER_2(neon_qshl_u8, i32, i32, i32)
-DEF_HELPER_2(neon_qshl_s8, i32, i32, i32)
-DEF_HELPER_2(neon_qshl_u16, i32, i32, i32)
-DEF_HELPER_2(neon_qshl_s16, i32, i32, i32)
-DEF_HELPER_2(neon_qshl_u32, i32, i32, i32)
-DEF_HELPER_2(neon_qshl_s32, i32, i32, i32)
-DEF_HELPER_2(neon_qshl_u64, i64, i64, i64)
-DEF_HELPER_2(neon_qshl_s64, i64, i64, i64)
-DEF_HELPER_2(neon_qshlu_s8, i32, i32, i32);
-DEF_HELPER_2(neon_qshlu_s16, i32, i32, i32);
-DEF_HELPER_2(neon_qshlu_s32, i32, i32, i32);
-DEF_HELPER_2(neon_qshlu_s64, i64, i64, i64);
-DEF_HELPER_2(neon_qrshl_u8, i32, i32, i32)
-DEF_HELPER_2(neon_qrshl_s8, i32, i32, i32)
-DEF_HELPER_2(neon_qrshl_u16, i32, i32, i32)
-DEF_HELPER_2(neon_qrshl_s16, i32, i32, i32)
-DEF_HELPER_2(neon_qrshl_u32, i32, i32, i32)
-DEF_HELPER_2(neon_qrshl_s32, i32, i32, i32)
-DEF_HELPER_2(neon_qrshl_u64, i64, i64, i64)
-DEF_HELPER_2(neon_qrshl_s64, i64, i64, i64)
+DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32);
+DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32);
+DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32);
+DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64);
+DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
DEF_HELPER_2(neon_add_u8, i32, i32, i32)
DEF_HELPER_2(neon_add_u16, i32, i32, i32)
DEF_HELPER_1(neon_cls_s32, i32, i32)
DEF_HELPER_1(neon_cnt_u8, i32, i32)
-DEF_HELPER_2(neon_qdmulh_s16, i32, i32, i32)
-DEF_HELPER_2(neon_qrdmulh_s16, i32, i32, i32)
-DEF_HELPER_2(neon_qdmulh_s32, i32, i32, i32)
-DEF_HELPER_2(neon_qrdmulh_s32, i32, i32, i32)
+DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
DEF_HELPER_1(neon_narrow_u8, i32, i64)
DEF_HELPER_1(neon_narrow_u16, i32, i64)
-DEF_HELPER_1(neon_unarrow_sat8, i32, i64)
-DEF_HELPER_1(neon_narrow_sat_u8, i32, i64)
-DEF_HELPER_1(neon_narrow_sat_s8, i32, i64)
-DEF_HELPER_1(neon_unarrow_sat16, i32, i64)
-DEF_HELPER_1(neon_narrow_sat_u16, i32, i64)
-DEF_HELPER_1(neon_narrow_sat_s16, i32, i64)
-DEF_HELPER_1(neon_unarrow_sat32, i32, i64)
-DEF_HELPER_1(neon_narrow_sat_u32, i32, i64)
-DEF_HELPER_1(neon_narrow_sat_s32, i32, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
DEF_HELPER_2(neon_paddl_u32, i64, i64, i64)
DEF_HELPER_2(neon_subl_u16, i64, i64, i64)
DEF_HELPER_2(neon_subl_u32, i64, i64, i64)
-DEF_HELPER_2(neon_addl_saturate_s32, i64, i64, i64)
-DEF_HELPER_2(neon_addl_saturate_s64, i64, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
DEF_HELPER_1(neon_negl_u32, i64, i64)
DEF_HELPER_1(neon_negl_u64, i64, i64)
-DEF_HELPER_1(neon_qabs_s8, i32, i32)
-DEF_HELPER_1(neon_qabs_s16, i32, i32)
-DEF_HELPER_1(neon_qabs_s32, i32, i32)
-DEF_HELPER_1(neon_qneg_s8, i32, i32)
-DEF_HELPER_1(neon_qneg_s16, i32, i32)
-DEF_HELPER_1(neon_qneg_s32, i32, i32)
-
-DEF_HELPER_2(neon_min_f32, i32, i32, i32)
-DEF_HELPER_2(neon_max_f32, i32, i32, i32)
-DEF_HELPER_2(neon_abd_f32, i32, i32, i32)
-DEF_HELPER_2(neon_add_f32, i32, i32, i32)
-DEF_HELPER_2(neon_sub_f32, i32, i32, i32)
-DEF_HELPER_2(neon_mul_f32, i32, i32, i32)
-DEF_HELPER_2(neon_ceq_f32, i32, i32, i32)
-DEF_HELPER_2(neon_cge_f32, i32, i32, i32)
-DEF_HELPER_2(neon_cgt_f32, i32, i32, i32)
-DEF_HELPER_2(neon_acge_f32, i32, i32, i32)
-DEF_HELPER_2(neon_acgt_f32, i32, i32, i32)
+DEF_HELPER_2(neon_qabs_s8, i32, env, i32)
+DEF_HELPER_2(neon_qabs_s16, i32, env, i32)
+DEF_HELPER_2(neon_qabs_s32, i32, env, i32)
+DEF_HELPER_2(neon_qneg_s8, i32, env, i32)
+DEF_HELPER_2(neon_qneg_s16, i32, env, i32)
+DEF_HELPER_2(neon_qneg_s32, i32, env, i32)
+
+DEF_HELPER_3(neon_min_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_max_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr)
/* iwmmxt_helper.c */
DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
-#define DEF_IWMMXT_HELPER_SIZE(name) \
-DEF_HELPER_2(iwmmxt_##name##b, i64, i64, i64) \
-DEF_HELPER_2(iwmmxt_##name##w, i64, i64, i64) \
-DEF_HELPER_2(iwmmxt_##name##l, i64, i64, i64) \
-
-DEF_IWMMXT_HELPER_SIZE(unpackl)
-DEF_IWMMXT_HELPER_SIZE(unpackh)
-
-DEF_HELPER_1(iwmmxt_unpacklub, i64, i64)
-DEF_HELPER_1(iwmmxt_unpackluw, i64, i64)
-DEF_HELPER_1(iwmmxt_unpacklul, i64, i64)
-DEF_HELPER_1(iwmmxt_unpackhub, i64, i64)
-DEF_HELPER_1(iwmmxt_unpackhuw, i64, i64)
-DEF_HELPER_1(iwmmxt_unpackhul, i64, i64)
-DEF_HELPER_1(iwmmxt_unpacklsb, i64, i64)
-DEF_HELPER_1(iwmmxt_unpacklsw, i64, i64)
-DEF_HELPER_1(iwmmxt_unpacklsl, i64, i64)
-DEF_HELPER_1(iwmmxt_unpackhsb, i64, i64)
-DEF_HELPER_1(iwmmxt_unpackhsw, i64, i64)
-DEF_HELPER_1(iwmmxt_unpackhsl, i64, i64)
-
-DEF_IWMMXT_HELPER_SIZE(cmpeq)
-DEF_IWMMXT_HELPER_SIZE(cmpgtu)
-DEF_IWMMXT_HELPER_SIZE(cmpgts)
-
-DEF_IWMMXT_HELPER_SIZE(mins)
-DEF_IWMMXT_HELPER_SIZE(minu)
-DEF_IWMMXT_HELPER_SIZE(maxs)
-DEF_IWMMXT_HELPER_SIZE(maxu)
-
-DEF_IWMMXT_HELPER_SIZE(subn)
-DEF_IWMMXT_HELPER_SIZE(addn)
-DEF_IWMMXT_HELPER_SIZE(subu)
-DEF_IWMMXT_HELPER_SIZE(addu)
-DEF_IWMMXT_HELPER_SIZE(subs)
-DEF_IWMMXT_HELPER_SIZE(adds)
-
-DEF_HELPER_2(iwmmxt_avgb0, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_avgb1, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_avgw0, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_avgw1, i64, i64, i64)
+#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
+DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
+
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
+
+DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(mins)
+DEF_IWMMXT_HELPER_SIZE_ENV(minu)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(subn)
+DEF_IWMMXT_HELPER_SIZE_ENV(addn)
+DEF_IWMMXT_HELPER_SIZE_ENV(subu)
+DEF_IWMMXT_HELPER_SIZE_ENV(addu)
+DEF_IWMMXT_HELPER_SIZE_ENV(subs)
+DEF_IWMMXT_HELPER_SIZE_ENV(adds)
+
+DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
DEF_HELPER_2(iwmmxt_msadb, i64, i64, i64)
DEF_HELPER_1(iwmmxt_msbw, i32, i64)
DEF_HELPER_1(iwmmxt_msbl, i32, i64)
-DEF_HELPER_2(iwmmxt_srlw, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_srll, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_srlq, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_sllw, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_slll, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_sllq, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_sraw, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_sral, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_sraq, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_rorw, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_rorl, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_rorq, i64, i64, i32)
-DEF_HELPER_2(iwmmxt_shufh, i64, i64, i32)
-
-DEF_HELPER_2(iwmmxt_packuw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_packul, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_packuq, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_packsw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_packsl, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_packsq, i64, i64, i64)
+DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
+
+DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
DEF_HELPER_2(set_teecr, void, env, i32)
-DEF_HELPER_2(neon_unzip8, void, i32, i32)
-DEF_HELPER_2(neon_unzip16, void, i32, i32)
-DEF_HELPER_2(neon_qunzip8, void, i32, i32)
-DEF_HELPER_2(neon_qunzip16, void, i32, i32)
-DEF_HELPER_2(neon_qunzip32, void, i32, i32)
-DEF_HELPER_2(neon_zip8, void, i32, i32)
-DEF_HELPER_2(neon_zip16, void, i32, i32)
-DEF_HELPER_2(neon_qzip8, void, i32, i32)
-DEF_HELPER_2(neon_qzip16, void, i32, i32)
-DEF_HELPER_2(neon_qzip32, void, i32, i32)
+DEF_HELPER_3(neon_unzip8, void, env, i32, i32)
+DEF_HELPER_3(neon_unzip16, void, env, i32, i32)
+DEF_HELPER_3(neon_qunzip8, void, env, i32, i32)
+DEF_HELPER_3(neon_qunzip16, void, env, i32, i32)
+DEF_HELPER_3(neon_qunzip32, void, env, i32, i32)
+DEF_HELPER_3(neon_zip8, void, env, i32, i32)
+DEF_HELPER_3(neon_zip16, void, env, i32, i32)
+DEF_HELPER_3(neon_qzip8, void, env, i32, i32)
+DEF_HELPER_3(neon_qzip16, void, env, i32, i32)
+DEF_HELPER_3(neon_qzip32, void, env, i32, i32)
#include "def-helper.h"
#include <stdio.h>
#include "cpu.h"
-#include "exec.h"
+#include "exec-all.h"
#include "helper.h"
/* iwMMXt macros extracted from GNU gdb. */
SIMD64_SET(NBIT64(x), SIMD_NBIT) | \
SIMD64_SET(ZBIT64(x), SIMD_ZBIT)
#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(uint64_t a, uint64_t b) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
{ \
a = \
(((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \
NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
return a; \
} \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(uint64_t a, uint64_t b) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
{ \
a = \
(((a >> SH0) & 0xffff) << 0) | \
NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \
return a; \
} \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(uint64_t a, uint64_t b) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
{ \
a = \
(((a >> SH0) & 0xffffffff) << 0) | \
NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
return a; \
} \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(uint64_t x) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUState *env, \
+ uint64_t x) \
{ \
x = \
(((x >> SH0) & 0xff) << 0) | \
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
return x; \
} \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(uint64_t x) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUState *env, \
+ uint64_t x) \
{ \
x = \
(((x >> SH0) & 0xffff) << 0) | \
NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
return x; \
} \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(uint64_t x) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUState *env, \
+ uint64_t x) \
{ \
x = (((x >> SH0) & 0xffffffff) << 0); \
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
return x; \
} \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(uint64_t x) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUState *env, \
+ uint64_t x) \
{ \
x = \
((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
return x; \
} \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(uint64_t x) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUState *env, \
+ uint64_t x) \
{ \
x = \
((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \
NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
return x; \
} \
-uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(uint64_t x) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUState *env, \
+ uint64_t x) \
{ \
x = EXTEND32((x >> SH0) & 0xffffffff); \
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
IWMMXT_OP_UNPACK(h, 32, 40, 48, 56)
#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \
-uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(uint64_t a, uint64_t b) \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
{ \
a = \
CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \
NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
return a; \
} \
-uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(uint64_t a, uint64_t b) \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
{ \
a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \
CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \
NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \
return a; \
} \
-uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(uint64_t a, uint64_t b) \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
{ \
a = CMP(0, Tl, O, 0xffffffff) | \
CMP(32, Tl, O, 0xffffffff); \
#define AVGB(SHR) ((( \
((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR)
#define IWMMXT_OP_AVGB(r) \
-uint64_t HELPER(iwmmxt_avgb##r)(uint64_t a, uint64_t b) \
+uint64_t HELPER(iwmmxt_avgb##r)(CPUState *env, uint64_t a, uint64_t b) \
{ \
const int round = r; \
a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \
#define AVGW(SHR) ((( \
((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR)
#define IWMMXT_OP_AVGW(r) \
-uint64_t HELPER(iwmmxt_avgw##r)(uint64_t a, uint64_t b) \
+uint64_t HELPER(iwmmxt_avgw##r)(CPUState *env, uint64_t a, uint64_t b) \
{ \
const int round = r; \
a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \
}
/* FIXME: Split wCASF setting into a separate op to avoid env use. */
-uint64_t HELPER(iwmmxt_srlw)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_srlw)(CPUState *env, uint64_t x, uint32_t n)
{
x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) |
(((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) |
return x;
}
-uint64_t HELPER(iwmmxt_srll)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_srll)(CPUState *env, uint64_t x, uint32_t n)
{
x = ((x & (0xffffffffll << 0)) >> n) |
((x >> n) & (0xffffffffll << 32));
return x;
}
-uint64_t HELPER(iwmmxt_srlq)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_srlq)(CPUState *env, uint64_t x, uint32_t n)
{
x >>= n;
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
return x;
}
-uint64_t HELPER(iwmmxt_sllw)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_sllw)(CPUState *env, uint64_t x, uint32_t n)
{
x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) |
(((x & (0xffffll << 16)) << n) & (0xffffll << 16)) |
return x;
}
-uint64_t HELPER(iwmmxt_slll)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_slll)(CPUState *env, uint64_t x, uint32_t n)
{
x = ((x << n) & (0xffffffffll << 0)) |
((x & (0xffffffffll << 32)) << n);
return x;
}
-uint64_t HELPER(iwmmxt_sllq)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_sllq)(CPUState *env, uint64_t x, uint32_t n)
{
x <<= n;
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
return x;
}
-uint64_t HELPER(iwmmxt_sraw)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_sraw)(CPUState *env, uint64_t x, uint32_t n)
{
x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) |
((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) |
return x;
}
-uint64_t HELPER(iwmmxt_sral)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_sral)(CPUState *env, uint64_t x, uint32_t n)
{
x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) |
(((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32);
return x;
}
-uint64_t HELPER(iwmmxt_sraq)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_sraq)(CPUState *env, uint64_t x, uint32_t n)
{
x = (int64_t) x >> n;
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
return x;
}
-uint64_t HELPER(iwmmxt_rorw)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_rorw)(CPUState *env, uint64_t x, uint32_t n)
{
x = ((((x & (0xffffll << 0)) >> n) |
((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) |
return x;
}
-uint64_t HELPER(iwmmxt_rorl)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_rorl)(CPUState *env, uint64_t x, uint32_t n)
{
x = ((x & (0xffffffffll << 0)) >> n) |
((x >> n) & (0xffffffffll << 32)) |
return x;
}
-uint64_t HELPER(iwmmxt_rorq)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_rorq)(CPUState *env, uint64_t x, uint32_t n)
{
x = (x >> n) | (x << (64 - n));
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
return x;
}
-uint64_t HELPER(iwmmxt_shufh)(uint64_t x, uint32_t n)
+uint64_t HELPER(iwmmxt_shufh)(CPUState *env, uint64_t x, uint32_t n)
{
x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) |
(((x >> ((n << 2) & 0x30)) & 0xffff) << 16) |
}
/* TODO: Unsigned-Saturation */
-uint64_t HELPER(iwmmxt_packuw)(uint64_t a, uint64_t b)
+uint64_t HELPER(iwmmxt_packuw)(CPUState *env, uint64_t a, uint64_t b)
{
a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
(((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
return a;
}
-uint64_t HELPER(iwmmxt_packul)(uint64_t a, uint64_t b)
+uint64_t HELPER(iwmmxt_packul)(CPUState *env, uint64_t a, uint64_t b)
{
a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
(((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
return a;
}
-uint64_t HELPER(iwmmxt_packuq)(uint64_t a, uint64_t b)
+uint64_t HELPER(iwmmxt_packuq)(CPUState *env, uint64_t a, uint64_t b)
{
a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
}
/* TODO: Signed-Saturation */
-uint64_t HELPER(iwmmxt_packsw)(uint64_t a, uint64_t b)
+uint64_t HELPER(iwmmxt_packsw)(CPUState *env, uint64_t a, uint64_t b)
{
a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
(((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
return a;
}
-uint64_t HELPER(iwmmxt_packsl)(uint64_t a, uint64_t b)
+uint64_t HELPER(iwmmxt_packsl)(CPUState *env, uint64_t a, uint64_t b)
{
a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
(((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
return a;
}
-uint64_t HELPER(iwmmxt_packsq)(uint64_t a, uint64_t b)
+uint64_t HELPER(iwmmxt_packsq)(CPUState *env, uint64_t a, uint64_t b)
{
a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
qemu_put_be32(f, env->cp15.c7_par);
qemu_put_be32(f, env->cp15.c9_insn);
qemu_put_be32(f, env->cp15.c9_data);
+ qemu_put_be32(f, env->cp15.c9_pmcr);
+ qemu_put_be32(f, env->cp15.c9_pmcnten);
+ qemu_put_be32(f, env->cp15.c9_pmovsr);
+ qemu_put_be32(f, env->cp15.c9_pmxevtyper);
+ qemu_put_be32(f, env->cp15.c9_pmuserenr);
+ qemu_put_be32(f, env->cp15.c9_pminten);
qemu_put_be32(f, env->cp15.c13_fcse);
qemu_put_be32(f, env->cp15.c13_context);
qemu_put_be32(f, env->cp15.c13_tls1);
env->cp15.c7_par = qemu_get_be32(f);
env->cp15.c9_insn = qemu_get_be32(f);
env->cp15.c9_data = qemu_get_be32(f);
+ env->cp15.c9_pmcr = qemu_get_be32(f);
+ env->cp15.c9_pmcnten = qemu_get_be32(f);
+ env->cp15.c9_pmovsr = qemu_get_be32(f);
+ env->cp15.c9_pmxevtyper = qemu_get_be32(f);
+ env->cp15.c9_pmuserenr = qemu_get_be32(f);
+ env->cp15.c9_pminten = qemu_get_be32(f);
env->cp15.c13_fcse = qemu_get_be32(f);
env->cp15.c13_context = qemu_get_be32(f);
env->cp15.c13_tls1 = qemu_get_be32(f);
#include <stdio.h>
#include "cpu.h"
-#include "exec.h"
+#include "exec-all.h"
#include "helper.h"
#define SIGNBIT (uint32_t)0x80000000
#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q
-#define NFS (&env->vfp.standard_fp_status)
-
#define NEON_TYPE1(name, type) \
typedef struct \
{ \
uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
NEON_VOP_BODY(vtype, n)
+#define NEON_VOP_ENV(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(CPUState *env, uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
/* Pairwise operations. */
/* For 32-bit elements each segment only contains a single element, so
the elementwise and pairwise operations are the same. */
dest = tmp; \
}} while(0)
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
-NEON_VOP(qadd_u8, neon_u8, 4)
+NEON_VOP_ENV(qadd_u8, neon_u8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
-NEON_VOP(qadd_u16, neon_u16, 2)
+NEON_VOP_ENV(qadd_u16, neon_u16, 2)
#undef NEON_FN
#undef NEON_USAT
-uint32_t HELPER(neon_qadd_u32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_qadd_u32)(CPUState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (res < a) {
return res;
}
-uint64_t HELPER(neon_qadd_u64)(uint64_t src1, uint64_t src2)
+uint64_t HELPER(neon_qadd_u64)(CPUState *env, uint64_t src1, uint64_t src2)
{
uint64_t res;
dest = tmp; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
-NEON_VOP(qadd_s8, neon_s8, 4)
+NEON_VOP_ENV(qadd_s8, neon_s8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
-NEON_VOP(qadd_s16, neon_s16, 2)
+NEON_VOP_ENV(qadd_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_SSAT
-uint32_t HELPER(neon_qadd_s32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_qadd_s32)(CPUState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
return res;
}
-uint64_t HELPER(neon_qadd_s64)(uint64_t src1, uint64_t src2)
+uint64_t HELPER(neon_qadd_s64)(CPUState *env, uint64_t src1, uint64_t src2)
{
uint64_t res;
dest = tmp; \
}} while(0)
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
-NEON_VOP(qsub_u8, neon_u8, 4)
+NEON_VOP_ENV(qsub_u8, neon_u8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
-NEON_VOP(qsub_u16, neon_u16, 2)
+NEON_VOP_ENV(qsub_u16, neon_u16, 2)
#undef NEON_FN
#undef NEON_USAT
-uint32_t HELPER(neon_qsub_u32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_qsub_u32)(CPUState *env, uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (res > a) {
return res;
}
-uint64_t HELPER(neon_qsub_u64)(uint64_t src1, uint64_t src2)
+uint64_t HELPER(neon_qsub_u64)(CPUState *env, uint64_t src1, uint64_t src2)
{
uint64_t res;
dest = tmp; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
-NEON_VOP(qsub_s8, neon_s8, 4)
+NEON_VOP_ENV(qsub_s8, neon_s8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
-NEON_VOP(qsub_s16, neon_s16, 2)
+NEON_VOP_ENV(qsub_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_SSAT
-uint32_t HELPER(neon_qsub_s32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_qsub_s32)(CPUState *env, uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
return res;
}
-uint64_t HELPER(neon_qsub_s64)(uint64_t src1, uint64_t src2)
+uint64_t HELPER(neon_qsub_s64)(CPUState *env, uint64_t src1, uint64_t src2)
{
uint64_t res;
dest = ~0; \
} \
}} while (0)
-NEON_VOP(qshl_u8, neon_u8, 4)
-NEON_VOP(qshl_u16, neon_u16, 2)
-NEON_VOP(qshl_u32, neon_u32, 1)
+NEON_VOP_ENV(qshl_u8, neon_u8, 4)
+NEON_VOP_ENV(qshl_u16, neon_u16, 2)
+NEON_VOP_ENV(qshl_u32, neon_u32, 1)
#undef NEON_FN
-uint64_t HELPER(neon_qshl_u64)(uint64_t val, uint64_t shiftop)
+uint64_t HELPER(neon_qshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
if (shift >= 64) {
} \
} \
}} while (0)
-NEON_VOP(qshl_s8, neon_s8, 4)
-NEON_VOP(qshl_s16, neon_s16, 2)
-NEON_VOP(qshl_s32, neon_s32, 1)
+NEON_VOP_ENV(qshl_s8, neon_s8, 4)
+NEON_VOP_ENV(qshl_s16, neon_s16, 2)
+NEON_VOP_ENV(qshl_s32, neon_s32, 1)
#undef NEON_FN
-uint64_t HELPER(neon_qshl_s64)(uint64_t valop, uint64_t shiftop)
+uint64_t HELPER(neon_qshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
int64_t val = valop;
} \
} \
}} while (0)
-NEON_VOP(qshlu_s8, neon_u8, 4)
-NEON_VOP(qshlu_s16, neon_u16, 2)
+NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
+NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
#undef NEON_FN
-uint32_t HELPER(neon_qshlu_s32)(uint32_t valop, uint32_t shiftop)
+uint32_t HELPER(neon_qshlu_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
{
if ((int32_t)valop < 0) {
SET_QC();
return 0;
}
- return helper_neon_qshl_u32(valop, shiftop);
+ return helper_neon_qshl_u32(env, valop, shiftop);
}
-uint64_t HELPER(neon_qshlu_s64)(uint64_t valop, uint64_t shiftop)
+uint64_t HELPER(neon_qshlu_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
{
if ((int64_t)valop < 0) {
SET_QC();
return 0;
}
- return helper_neon_qshl_u64(valop, shiftop);
+ return helper_neon_qshl_u64(env, valop, shiftop);
}
/* FIXME: This is wrong. */
dest = ~0; \
} \
}} while (0)
-NEON_VOP(qrshl_u8, neon_u8, 4)
-NEON_VOP(qrshl_u16, neon_u16, 2)
+NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
+NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
* intermediate 64 bits accumulator. */
-uint32_t HELPER(neon_qrshl_u32)(uint32_t val, uint32_t shiftop)
+uint32_t HELPER(neon_qrshl_u32)(CPUState *env, uint32_t val, uint32_t shiftop)
{
uint32_t dest;
int8_t shift = (int8_t)shiftop;
/* Handling addition overflow with 64 bits inputs values is more
* tricky than with 32 bits values. */
-uint64_t HELPER(neon_qrshl_u64)(uint64_t val, uint64_t shiftop)
+uint64_t HELPER(neon_qrshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
if (shift >= 64) {
} \
} \
}} while (0)
-NEON_VOP(qrshl_s8, neon_s8, 4)
-NEON_VOP(qrshl_s16, neon_s16, 2)
+NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
+NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
* intermediate 64 bits accumulator. */
-uint32_t HELPER(neon_qrshl_s32)(uint32_t valop, uint32_t shiftop)
+uint32_t HELPER(neon_qrshl_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
{
int32_t dest;
int32_t val = (int32_t)valop;
/* Handling addition overflow with 64 bits inputs values is more
* tricky than with 32 bits values. */
-uint64_t HELPER(neon_qrshl_s64)(uint64_t valop, uint64_t shiftop)
+uint64_t HELPER(neon_qrshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
int64_t val = valop;
dest = tmp >> 16; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
-NEON_VOP(qdmulh_s16, neon_s16, 2)
+NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
-NEON_VOP(qrdmulh_s16, neon_s16, 2)
+NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_QDMULH16
dest = tmp >> 32; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
-NEON_VOP(qdmulh_s32, neon_s32, 1)
+NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
-NEON_VOP(qrdmulh_s32, neon_s32, 1)
+NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
#undef NEON_FN
#undef NEON_QDMULH32
return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
}
-uint32_t HELPER(neon_unarrow_sat8)(uint64_t x)
+uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
{
uint16_t s;
uint8_t d;
return res;
}
-uint32_t HELPER(neon_narrow_sat_u8)(uint64_t x)
+uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
{
uint16_t s;
uint8_t d;
return res;
}
-uint32_t HELPER(neon_narrow_sat_s8)(uint64_t x)
+uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
{
int16_t s;
uint8_t d;
return res;
}
-uint32_t HELPER(neon_unarrow_sat16)(uint64_t x)
+uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
{
uint32_t high;
uint32_t low;
return low | (high << 16);
}
-uint32_t HELPER(neon_narrow_sat_u16)(uint64_t x)
+uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
{
uint32_t high;
uint32_t low;
return low | (high << 16);
}
-uint32_t HELPER(neon_narrow_sat_s16)(uint64_t x)
+uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
{
int32_t low;
int32_t high;
return (uint16_t)low | (high << 16);
}
-uint32_t HELPER(neon_unarrow_sat32)(uint64_t x)
+uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
{
if (x & 0x8000000000000000ull) {
SET_QC();
return x;
}
-uint32_t HELPER(neon_narrow_sat_u32)(uint64_t x)
+uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
{
if (x > 0xffffffffu) {
SET_QC();
return x;
}
-uint32_t HELPER(neon_narrow_sat_s32)(uint64_t x)
+uint32_t HELPER(neon_narrow_sat_s32)(CPUState *env, uint64_t x)
{
if ((int64_t)x != (int32_t)x) {
SET_QC();
return (a - b) ^ mask;
}
-uint64_t HELPER(neon_addl_saturate_s32)(uint64_t a, uint64_t b)
+uint64_t HELPER(neon_addl_saturate_s32)(CPUState *env, uint64_t a, uint64_t b)
{
uint32_t x, y;
uint32_t low, high;
return low | ((uint64_t)high << 32);
}
-uint64_t HELPER(neon_addl_saturate_s64)(uint64_t a, uint64_t b)
+uint64_t HELPER(neon_addl_saturate_s64)(CPUState *env, uint64_t a, uint64_t b)
{
uint64_t result;
} else if (x < 0) { \
x = -x; \
}} while (0)
-uint32_t HELPER(neon_qabs_s8)(uint32_t x)
+uint32_t HELPER(neon_qabs_s8)(CPUState *env, uint32_t x)
{
neon_s8 vec;
NEON_UNPACK(neon_s8, vec, x);
} else { \
x = -x; \
}} while (0)
-uint32_t HELPER(neon_qneg_s8)(uint32_t x)
+uint32_t HELPER(neon_qneg_s8)(CPUState *env, uint32_t x)
{
neon_s8 vec;
NEON_UNPACK(neon_s8, vec, x);
} else if (x < 0) { \
x = -x; \
}} while (0)
-uint32_t HELPER(neon_qabs_s16)(uint32_t x)
+uint32_t HELPER(neon_qabs_s16)(CPUState *env, uint32_t x)
{
neon_s16 vec;
NEON_UNPACK(neon_s16, vec, x);
} else { \
x = -x; \
}} while (0)
-uint32_t HELPER(neon_qneg_s16)(uint32_t x)
+uint32_t HELPER(neon_qneg_s16)(CPUState *env, uint32_t x)
{
neon_s16 vec;
NEON_UNPACK(neon_s16, vec, x);
}
#undef DO_QNEG16
-uint32_t HELPER(neon_qabs_s32)(uint32_t x)
+uint32_t HELPER(neon_qabs_s32)(CPUState *env, uint32_t x)
{
if (x == SIGNBIT) {
SET_QC();
return x;
}
-uint32_t HELPER(neon_qneg_s32)(uint32_t x)
+uint32_t HELPER(neon_qneg_s32)(CPUState *env, uint32_t x)
{
if (x == SIGNBIT) {
SET_QC();
}
/* NEON Float helpers. */
-uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b, void *fpstp)
{
- return float32_val(float32_min(make_float32(a), make_float32(b), NFS));
+ float_status *fpst = fpstp;
+ return float32_val(float32_min(make_float32(a), make_float32(b), fpst));
}
-uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b, void *fpstp)
{
- return float32_val(float32_max(make_float32(a), make_float32(b), NFS));
+ float_status *fpst = fpstp;
+ return float32_val(float32_max(make_float32(a), make_float32(b), fpst));
}
-uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp)
{
+ float_status *fpst = fpstp;
float32 f0 = make_float32(a);
float32 f1 = make_float32(b);
- return float32_val(float32_abs(float32_sub(f0, f1, NFS)));
-}
-
-uint32_t HELPER(neon_add_f32)(uint32_t a, uint32_t b)
-{
- return float32_val(float32_add(make_float32(a), make_float32(b), NFS));
-}
-
-uint32_t HELPER(neon_sub_f32)(uint32_t a, uint32_t b)
-{
- return float32_val(float32_sub(make_float32(a), make_float32(b), NFS));
-}
-
-uint32_t HELPER(neon_mul_f32)(uint32_t a, uint32_t b)
-{
- return float32_val(float32_mul(make_float32(a), make_float32(b), NFS));
+ return float32_val(float32_abs(float32_sub(f0, f1, fpst)));
}
/* Floating point comparisons produce an integer result.
* Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
* Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
*/
-uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp)
{
- return -float32_eq_quiet(make_float32(a), make_float32(b), NFS);
+ float_status *fpst = fpstp;
+ return -float32_eq_quiet(make_float32(a), make_float32(b), fpst);
}
-uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp)
{
- return -float32_le(make_float32(b), make_float32(a), NFS);
+ float_status *fpst = fpstp;
+ return -float32_le(make_float32(b), make_float32(a), fpst);
}
-uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp)
{
- return -float32_lt(make_float32(b), make_float32(a), NFS);
+ float_status *fpst = fpstp;
+ return -float32_lt(make_float32(b), make_float32(a), fpst);
}
-uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp)
{
+ float_status *fpst = fpstp;
float32 f0 = float32_abs(make_float32(a));
float32 f1 = float32_abs(make_float32(b));
- return -float32_le(f1, f0, NFS);
+ return -float32_le(f1, f0, fpst);
}
-uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
+uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp)
{
+ float_status *fpst = fpstp;
float32 f0 = float32_abs(make_float32(a));
float32 f1 = float32_abs(make_float32(b));
- return -float32_lt(f1, f0, NFS);
+ return -float32_lt(f1, f0, fpst);
}
#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
-void HELPER(neon_qunzip8)(uint32_t rd, uint32_t rm)
+void HELPER(neon_qunzip8)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_qunzip16)(uint32_t rd, uint32_t rm)
+void HELPER(neon_qunzip16)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_qunzip32)(uint32_t rd, uint32_t rm)
+void HELPER(neon_qunzip32)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_unzip8)(uint32_t rd, uint32_t rm)
+void HELPER(neon_unzip8)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
env->vfp.regs[rd] = make_float64(d0);
}
-void HELPER(neon_unzip16)(uint32_t rd, uint32_t rm)
+void HELPER(neon_unzip16)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
env->vfp.regs[rd] = make_float64(d0);
}
-void HELPER(neon_qzip8)(uint32_t rd, uint32_t rm)
+void HELPER(neon_qzip8)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_qzip16)(uint32_t rd, uint32_t rm)
+void HELPER(neon_qzip16)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_qzip32)(uint32_t rd, uint32_t rm)
+void HELPER(neon_qzip32)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_zip8)(uint32_t rd, uint32_t rm)
+void HELPER(neon_zip8)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
env->vfp.regs[rd] = make_float64(d0);
}
-void HELPER(neon_zip16)(uint32_t rd, uint32_t rm)
+void HELPER(neon_zip16)(CPUState *env, uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
}
}
+static TCGv_ptr get_fpstatus_ptr(int neon)
+{
+ TCGv_ptr statusptr = tcg_temp_new_ptr();
+ int offset;
+ if (neon) {
+ offset = offsetof(CPUState, vfp.standard_fp_status);
+ } else {
+ offset = offsetof(CPUState, vfp.fp_status);
+ }
+ tcg_gen_addi_ptr(statusptr, cpu_env, offset);
+ return statusptr;
+}
+
#define VFP_OP2(name) \
static inline void gen_vfp_##name(int dp) \
{ \
- if (dp) \
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
- else \
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
+ TCGv_ptr fpst = get_fpstatus_ptr(0); \
+ if (dp) { \
+ gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
+ } else { \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
+ } \
+ tcg_temp_free_ptr(fpst); \
}
VFP_OP2(add)
static inline void gen_vfp_F1_mul(int dp)
{
/* Like gen_vfp_mul() but put result in F1 */
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
if (dp) {
- gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, cpu_env);
+ gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
} else {
- gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, cpu_env);
+ gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
}
+ tcg_temp_free_ptr(fpst);
}
static inline void gen_vfp_F1_neg(int dp)
#define VFP_GEN_ITOF(name) \
static inline void gen_vfp_##name(int dp, int neon) \
{ \
- TCGv_ptr statusptr = tcg_temp_new_ptr(); \
- int offset; \
- if (neon) { \
- offset = offsetof(CPUState, vfp.standard_fp_status); \
- } else { \
- offset = offsetof(CPUState, vfp.fp_status); \
- } \
- tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
if (dp) { \
gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
} else { \
#define VFP_GEN_FTOI(name) \
static inline void gen_vfp_##name(int dp, int neon) \
{ \
- TCGv_ptr statusptr = tcg_temp_new_ptr(); \
- int offset; \
- if (neon) { \
- offset = offsetof(CPUState, vfp.standard_fp_status); \
- } else { \
- offset = offsetof(CPUState, vfp.fp_status); \
- } \
- tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
if (dp) { \
gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
} else { \
static inline void gen_vfp_##name(int dp, int shift, int neon) \
{ \
TCGv tmp_shift = tcg_const_i32(shift); \
- TCGv_ptr statusptr = tcg_temp_new_ptr(); \
- int offset; \
- if (neon) { \
- offset = offsetof(CPUState, vfp.standard_fp_status); \
- } else { \
- offset = offsetof(CPUState, vfp.fp_status); \
- } \
- tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
if (dp) { \
gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
} else { \
gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
}
-#define IWMMXT_OP_SIZE(name) \
-IWMMXT_OP(name##b) \
-IWMMXT_OP(name##w) \
-IWMMXT_OP(name##l)
+#define IWMMXT_OP_ENV(name) \
+static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
+{ \
+ iwmmxt_load_reg(cpu_V1, rn); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
+}
-#define IWMMXT_OP_1(name) \
+#define IWMMXT_OP_ENV_SIZE(name) \
+IWMMXT_OP_ENV(name##b) \
+IWMMXT_OP_ENV(name##w) \
+IWMMXT_OP_ENV(name##l)
+
+#define IWMMXT_OP_ENV1(name) \
static inline void gen_op_iwmmxt_##name##_M0(void) \
{ \
- gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
}
IWMMXT_OP(maddsq)
IWMMXT_OP(macsw)
IWMMXT_OP(macuw)
-IWMMXT_OP_SIZE(unpackl)
-IWMMXT_OP_SIZE(unpackh)
-
-IWMMXT_OP_1(unpacklub)
-IWMMXT_OP_1(unpackluw)
-IWMMXT_OP_1(unpacklul)
-IWMMXT_OP_1(unpackhub)
-IWMMXT_OP_1(unpackhuw)
-IWMMXT_OP_1(unpackhul)
-IWMMXT_OP_1(unpacklsb)
-IWMMXT_OP_1(unpacklsw)
-IWMMXT_OP_1(unpacklsl)
-IWMMXT_OP_1(unpackhsb)
-IWMMXT_OP_1(unpackhsw)
-IWMMXT_OP_1(unpackhsl)
-
-IWMMXT_OP_SIZE(cmpeq)
-IWMMXT_OP_SIZE(cmpgtu)
-IWMMXT_OP_SIZE(cmpgts)
-
-IWMMXT_OP_SIZE(mins)
-IWMMXT_OP_SIZE(minu)
-IWMMXT_OP_SIZE(maxs)
-IWMMXT_OP_SIZE(maxu)
-
-IWMMXT_OP_SIZE(subn)
-IWMMXT_OP_SIZE(addn)
-IWMMXT_OP_SIZE(subu)
-IWMMXT_OP_SIZE(addu)
-IWMMXT_OP_SIZE(subs)
-IWMMXT_OP_SIZE(adds)
-
-IWMMXT_OP(avgb0)
-IWMMXT_OP(avgb1)
-IWMMXT_OP(avgw0)
-IWMMXT_OP(avgw1)
+IWMMXT_OP_ENV_SIZE(unpackl)
+IWMMXT_OP_ENV_SIZE(unpackh)
+
+IWMMXT_OP_ENV1(unpacklub)
+IWMMXT_OP_ENV1(unpackluw)
+IWMMXT_OP_ENV1(unpacklul)
+IWMMXT_OP_ENV1(unpackhub)
+IWMMXT_OP_ENV1(unpackhuw)
+IWMMXT_OP_ENV1(unpackhul)
+IWMMXT_OP_ENV1(unpacklsb)
+IWMMXT_OP_ENV1(unpacklsw)
+IWMMXT_OP_ENV1(unpacklsl)
+IWMMXT_OP_ENV1(unpackhsb)
+IWMMXT_OP_ENV1(unpackhsw)
+IWMMXT_OP_ENV1(unpackhsl)
+
+IWMMXT_OP_ENV_SIZE(cmpeq)
+IWMMXT_OP_ENV_SIZE(cmpgtu)
+IWMMXT_OP_ENV_SIZE(cmpgts)
+
+IWMMXT_OP_ENV_SIZE(mins)
+IWMMXT_OP_ENV_SIZE(minu)
+IWMMXT_OP_ENV_SIZE(maxs)
+IWMMXT_OP_ENV_SIZE(maxu)
+
+IWMMXT_OP_ENV_SIZE(subn)
+IWMMXT_OP_ENV_SIZE(addn)
+IWMMXT_OP_ENV_SIZE(subu)
+IWMMXT_OP_ENV_SIZE(addu)
+IWMMXT_OP_ENV_SIZE(subs)
+IWMMXT_OP_ENV_SIZE(adds)
+
+IWMMXT_OP_ENV(avgb0)
+IWMMXT_OP_ENV(avgb1)
+IWMMXT_OP_ENV(avgw0)
+IWMMXT_OP_ENV(avgw1)
IWMMXT_OP(msadb)
-IWMMXT_OP(packuw)
-IWMMXT_OP(packul)
-IWMMXT_OP(packuq)
-IWMMXT_OP(packsw)
-IWMMXT_OP(packsl)
-IWMMXT_OP(packsq)
+IWMMXT_OP_ENV(packuw)
+IWMMXT_OP_ENV(packul)
+IWMMXT_OP_ENV(packuq)
+IWMMXT_OP_ENV(packsw)
+IWMMXT_OP_ENV(packsl)
+IWMMXT_OP_ENV(packsq)
static void gen_op_iwmmxt_set_mup(void)
{
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
tcg_temp_free_i32(tmp);
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
tcg_temp_free_i32(tmp);
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(tmp);
return 1;
}
- gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
tcg_temp_free_i32(tmp);
return 1;
}
- gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
tcg_temp_free_i32(tmp);
return 1;
}
- gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
tcg_temp_free_i32(tmp);
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
- gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
tcg_temp_free(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
return 0;
}
-static int cp15_user_ok(uint32_t insn)
+static int cp15_user_ok(CPUState *env, uint32_t insn)
{
int cpn = (insn >> 16) & 0xf;
int cpm = insn & 0xf;
int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
+ if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
+ /* Performance monitor registers fall into three categories:
+ * (a) always UNDEF in usermode
+ * (b) UNDEF only if PMUSERENR.EN is 0
+ * (c) always read OK and UNDEF on write (PMUSERENR only)
+ */
+ if ((cpm == 12 && (op < 6)) ||
+ (cpm == 13 && (op < 3))) {
+ return env->cp15.c9_pmuserenr;
+ } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
+ /* PMUSERENR, read only */
+ return 1;
+ }
+ return 0;
+ }
+
if (cpn == 13 && cpm == 0) {
/* TLS register. */
if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
/* cdp */
return 1;
}
- if (IS_USER(s) && !cp15_user_ok(insn)) {
+ if (IS_USER(s) && !cp15_user_ok(env, insn)) {
return 1;
}
if (q) {
switch (size) {
case 0:
- gen_helper_neon_qunzip8(tmp, tmp2);
+ gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
break;
case 1:
- gen_helper_neon_qunzip16(tmp, tmp2);
+ gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
break;
case 2:
- gen_helper_neon_qunzip32(tmp, tmp2);
+ gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
break;
default:
abort();
} else {
switch (size) {
case 0:
- gen_helper_neon_unzip8(tmp, tmp2);
+ gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
break;
case 1:
- gen_helper_neon_unzip16(tmp, tmp2);
+ gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
break;
default:
abort();
if (q) {
switch (size) {
case 0:
- gen_helper_neon_qzip8(tmp, tmp2);
+ gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
break;
case 1:
- gen_helper_neon_qzip16(tmp, tmp2);
+ gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
break;
case 2:
- gen_helper_neon_qzip32(tmp, tmp2);
+ gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
break;
default:
abort();
} else {
switch (size) {
case 0:
- gen_helper_neon_zip8(tmp, tmp2);
+ gen_helper_neon_zip8(cpu_env, tmp, tmp2);
break;
case 1:
- gen_helper_neon_zip16(tmp, tmp2);
+ gen_helper_neon_zip16(cpu_env, tmp, tmp2);
break;
default:
abort();
static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
- case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
- case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
- case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
+ case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
default: abort();
}
}
static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
- case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
- case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
- case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
+ case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
default: abort();
}
}
static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
- case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
- case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
- case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
+ case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
default: abort();
}
}
static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
{
switch (size) {
- case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
- case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
+ case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
+ case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
default: abort();
}
}
switch (op) {
case NEON_3R_VQADD:
if (u) {
- gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
} else {
- gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
}
break;
case NEON_3R_VQSUB:
if (u) {
- gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
} else {
- gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
}
break;
case NEON_3R_VSHL:
break;
case NEON_3R_VQSHL:
if (u) {
- gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
} else {
- gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
+ gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
}
break;
case NEON_3R_VRSHL:
break;
case NEON_3R_VQRSHL:
if (u) {
- gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
+ gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
} else {
- gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
+ gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
}
break;
case NEON_3R_VADD_VSUB:
GEN_NEON_INTEGER_OP(hadd);
break;
case NEON_3R_VQADD:
- GEN_NEON_INTEGER_OP(qadd);
+ GEN_NEON_INTEGER_OP_ENV(qadd);
break;
case NEON_3R_VRHADD:
GEN_NEON_INTEGER_OP(rhadd);
GEN_NEON_INTEGER_OP(hsub);
break;
case NEON_3R_VQSUB:
- GEN_NEON_INTEGER_OP(qsub);
+ GEN_NEON_INTEGER_OP_ENV(qsub);
break;
case NEON_3R_VCGT:
GEN_NEON_INTEGER_OP(cgt);
GEN_NEON_INTEGER_OP(shl);
break;
case NEON_3R_VQSHL:
- GEN_NEON_INTEGER_OP(qshl);
+ GEN_NEON_INTEGER_OP_ENV(qshl);
break;
case NEON_3R_VRSHL:
GEN_NEON_INTEGER_OP(rshl);
break;
case NEON_3R_VQRSHL:
- GEN_NEON_INTEGER_OP(qrshl);
+ GEN_NEON_INTEGER_OP_ENV(qrshl);
break;
case NEON_3R_VMAX:
GEN_NEON_INTEGER_OP(max);
case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
if (!u) { /* VQDMULH */
switch (size) {
- case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
+ case 1:
+ gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
+ break;
default: abort();
}
} else { /* VQRDMULH */
switch (size) {
- case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
+ case 1:
+ gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
+ break;
default: abort();
}
}
}
break;
case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
switch ((u << 2) | size) {
case 0: /* VADD */
- gen_helper_neon_add_f32(tmp, tmp, tmp2);
+ case 4: /* VPADD */
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
break;
case 2: /* VSUB */
- gen_helper_neon_sub_f32(tmp, tmp, tmp2);
- break;
- case 4: /* VPADD */
- gen_helper_neon_add_f32(tmp, tmp, tmp2);
+ gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
break;
case 6: /* VABD */
- gen_helper_neon_abd_f32(tmp, tmp, tmp2);
+ gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
break;
default:
abort();
}
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_FLOAT_MULTIPLY:
- gen_helper_neon_mul_f32(tmp, tmp, tmp2);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
if (!u) {
tcg_temp_free_i32(tmp2);
tmp2 = neon_load_reg(rd, pass);
if (size == 0) {
- gen_helper_neon_add_f32(tmp, tmp, tmp2);
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
} else {
- gen_helper_neon_sub_f32(tmp, tmp2, tmp);
+ gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
}
}
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_FLOAT_CMP:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
if (!u) {
- gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
+ gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
} else {
- if (size == 0)
- gen_helper_neon_cge_f32(tmp, tmp, tmp2);
- else
- gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
+ if (size == 0) {
+ gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
+ }
}
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_FLOAT_ACMP:
- if (size == 0)
- gen_helper_neon_acge_f32(tmp, tmp, tmp2);
- else
- gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ if (size == 0) {
+ gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
+ }
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_FLOAT_MINMAX:
- if (size == 0)
- gen_helper_neon_max_f32(tmp, tmp, tmp2);
- else
- gen_helper_neon_min_f32(tmp, tmp, tmp2);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ if (size == 0) {
+ gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
+ }
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_VRECPS_VRSQRTS:
if (size == 0)
gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
break;
case 6: /* VQSHLU */
- gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
break;
case 7: /* VQSHL */
if (u) {
- gen_helper_neon_qshl_u64(cpu_V0,
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
cpu_V0, cpu_V1);
} else {
- gen_helper_neon_qshl_s64(cpu_V0,
+ gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
cpu_V0, cpu_V1);
}
break;
case 6: /* VQSHLU */
switch (size) {
case 0:
- gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
+ gen_helper_neon_qshlu_s8(tmp, cpu_env,
+ tmp, tmp2);
break;
case 1:
- gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
+ gen_helper_neon_qshlu_s16(tmp, cpu_env,
+ tmp, tmp2);
break;
case 2:
- gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
+ gen_helper_neon_qshlu_s32(tmp, cpu_env,
+ tmp, tmp2);
break;
default:
abort();
}
break;
case 7: /* VQSHL */
- GEN_NEON_INTEGER_OP(qshl);
+ GEN_NEON_INTEGER_OP_ENV(qshl);
break;
}
tcg_temp_free_i32(tmp2);
tmp2 = neon_load_reg(rn, pass);
if (op == 12) {
if (size == 1) {
- gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
+ gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
} else {
- gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
+ gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
}
} else if (op == 13) {
if (size == 1) {
- gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
+ gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
} else {
- gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
+ gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
}
} else if (op & 1) {
- gen_helper_neon_mul_f32(tmp, tmp, tmp2);
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
} else {
switch (size) {
case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
gen_neon_add(size, tmp, tmp2);
break;
case 1:
- gen_helper_neon_add_f32(tmp, tmp, tmp2);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case 4:
gen_neon_rsb(size, tmp, tmp2);
break;
case 5:
- gen_helper_neon_sub_f32(tmp, tmp2, tmp);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
default:
abort();
}
break;
case NEON_2RM_VQABS:
switch (size) {
- case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
- case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
- case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
+ case 0:
+ gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
+ break;
+ case 1:
+ gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
+ break;
+ case 2:
+ gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
+ break;
default: abort();
}
break;
case NEON_2RM_VQNEG:
switch (size) {
- case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
- case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
- case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
+ case 0:
+ gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
+ break;
+ case 1:
+ gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
+ break;
+ case 2:
+ gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
+ break;
default: abort();
}
break;
tcg_temp_free(tmp2);
break;
case NEON_2RM_VCGT0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
+ gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCGE0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_cge_f32(tmp, tmp, tmp2);
+ gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCEQ0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
+ gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCLE0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_cge_f32(tmp, tmp2, tmp);
+ gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCLT0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
+ gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VABS_F:
gen_vfp_abs(0);
break;
}
#if !defined(CONFIG_USER_ONLY)
-void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
- int is_asi, int size);
+void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
+ int is_write, int is_exec, int is_asi, int size);
#endif
static inline bool cpu_has_work(CPUState *env)
mmu_write(env, rn, v);
}
-void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
- int is_asi, int size)
+void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
+ int is_write, int is_exec, int is_asi, int size)
{
CPUState *saved_env;
- if (!cpu_single_env) {
- /* XXX: ??? */
- return;
- }
-
- /* XXX: hack to restore env in all cases, even if not called from
- generated code */
saved_env = env;
- env = cpu_single_env;
+ env = env1;
+
qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n",
addr, is_write, is_exec);
if (!(env->sregs[SR_MSR] & MSR_EE)) {
void r4k_helper_tlbp (void);
void r4k_helper_tlbr (void);
-void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
- int unused, int size);
+void cpu_unassigned_access(CPUState *env, target_phys_addr_t addr,
+ int is_write, int is_exec, int unused, int size);
#endif
void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf);
env = saved_env;
}
-void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
- int unused, int size)
+void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
+ int is_write, int is_exec, int unused, int size)
{
+ env = env1;
+
if (is_exec)
helper_raise_exception(EXCP_IBE);
else
#endif
#define TTE_VALID_BIT (1ULL << 63)
+#define TTE_NFO_BIT (1ULL << 60)
#define TTE_USED_BIT (1ULL << 41)
#define TTE_LOCKED_BIT (1ULL << 6)
+#define TTE_SIDEEFFECT_BIT (1ULL << 3)
+#define TTE_PRIV_BIT (1ULL << 2)
+#define TTE_W_OK_BIT (1ULL << 1)
#define TTE_GLOBAL_BIT (1ULL << 0)
#define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT)
+#define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT)
#define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT)
#define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT)
+#define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT)
+#define TTE_IS_PRIV(tte) ((tte) & TTE_PRIV_BIT)
+#define TTE_IS_W_OK(tte) ((tte) & TTE_W_OK_BIT)
#define TTE_IS_GLOBAL(tte) ((tte) & TTE_GLOBAL_BIT)
#define TTE_SET_USED(tte) ((tte) |= TTE_USED_BIT)
#define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT)
+#define TTE_PGSIZE(tte) (((tte) >> 61) & 3ULL)
+#define TTE_PA(tte) ((tte) & 0x1ffffffe000ULL)
+
+#define SFSR_NF_BIT (1ULL << 24) /* JPS1 NoFault */
+#define SFSR_TM_BIT (1ULL << 15) /* JPS1 TLB Miss */
+#define SFSR_FT_VA_IMMU_BIT (1ULL << 13) /* USIIi VA out of range (IMMU) */
+#define SFSR_FT_VA_DMMU_BIT (1ULL << 12) /* USIIi VA out of range (DMMU) */
+#define SFSR_FT_NFO_BIT (1ULL << 11) /* NFO page access */
+#define SFSR_FT_ILL_BIT (1ULL << 10) /* illegal LDA/STA ASI */
+#define SFSR_FT_ATOMIC_BIT (1ULL << 9) /* atomic op on noncacheable area */
+#define SFSR_FT_NF_E_BIT (1ULL << 8) /* NF access on side effect area */
+#define SFSR_FT_PRIV_BIT (1ULL << 7) /* privilege violation */
+#define SFSR_PR_BIT (1ULL << 3) /* privilege mode */
+#define SFSR_WRITE_BIT (1ULL << 2) /* write access mode */
+#define SFSR_OW_BIT (1ULL << 1) /* status overwritten */
+#define SFSR_VALID_BIT (1ULL << 0) /* status valid */
+
+#define SFSR_ASI_SHIFT 16 /* 23:16 ASI value */
+#define SFSR_ASI_MASK (0xffULL << SFSR_ASI_SHIFT)
+#define SFSR_CT_PRIMARY (0ULL << 4) /* 5:4 context type */
+#define SFSR_CT_SECONDARY (1ULL << 4)
+#define SFSR_CT_NUCLEUS (2ULL << 4)
+#define SFSR_CT_NOTRANS (3ULL << 4)
+#define SFSR_CT_MASK (3ULL << 4)
+
typedef struct SparcTLBEntry {
uint64_t tag;
uint64_t tte;
/* cpu-exec.c */
#if !defined(CONFIG_USER_ONLY)
-void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
- int is_asi, int size);
+void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
+ int is_write, int is_exec, int is_asi, int size);
+#if defined(TARGET_SPARC64)
target_phys_addr_t cpu_get_phys_page_nofault(CPUState *env, target_ulong addr,
int mmu_idx);
+#endif
#endif
int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
{
uint64_t mask;
- switch ((tlb->tte >> 61) & 3) {
+ switch (TTE_PGSIZE(tlb->tte)) {
default:
case 0x0: // 8k
mask = 0xffffffffffffe000ULL;
{
unsigned int i;
uint64_t context;
+ uint64_t sfsr = 0;
int is_user = (mmu_idx == MMU_USER_IDX ||
mmu_idx == MMU_USER_SECONDARY_IDX);
case MMU_USER_IDX:
case MMU_KERNEL_IDX:
context = env->dmmu.mmu_primary_context & 0x1fff;
+ sfsr |= SFSR_CT_PRIMARY;
break;
case MMU_USER_SECONDARY_IDX:
case MMU_KERNEL_SECONDARY_IDX:
context = env->dmmu.mmu_secondary_context & 0x1fff;
+ sfsr |= SFSR_CT_SECONDARY;
break;
case MMU_NUCLEUS_IDX:
+ sfsr |= SFSR_CT_NUCLEUS;
+ /* FALLTHRU */
default:
context = 0;
break;
}
+ if (rw == 1) {
+ sfsr |= SFSR_WRITE_BIT;
+ } else if (rw == 4) {
+ sfsr |= SFSR_NF_BIT;
+ }
+
for (i = 0; i < 64; i++) {
// ctx match, vaddr match, valid?
if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
-
- uint8_t fault_type = 0;
+ int do_fault = 0;
// access ok?
- if ((env->dtlb[i].tte & 0x4) && is_user) {
- fault_type |= 1; /* privilege violation */
- env->exception_index = TT_DFAULT;
+ /* multiple bits in SFSR.FT may be set on TT_DFAULT */
+ if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
DPRINTF_MMU("DFAULT at %" PRIx64 " context %" PRIx64
" mmu_idx=%d tl=%d\n",
address, context, mmu_idx, env->tl);
- } else if (!(env->dtlb[i].tte & 0x2) && (rw == 1)) {
+ }
+ if (rw == 4) {
+ if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_NF_E_BIT;
+ }
+ } else {
+ if (TTE_IS_NFO(env->dtlb[i].tte)) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_NFO_BIT;
+ }
+ }
+
+ if (do_fault) {
+ /* faults above are reported with TT_DFAULT. */
+ env->exception_index = TT_DFAULT;
+ } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
+ do_fault = 1;
env->exception_index = TT_DPROT;
DPRINTF_MMU("DPROT at %" PRIx64 " context %" PRIx64
" mmu_idx=%d tl=%d\n",
address, context, mmu_idx, env->tl);
- } else {
+ }
+
+ if (!do_fault) {
*prot = PAGE_READ;
- if (env->dtlb[i].tte & 0x2)
+ if (TTE_IS_W_OK(env->dtlb[i].tte)) {
*prot |= PAGE_WRITE;
+ }
TTE_SET_USED(env->dtlb[i].tte);
return 0;
}
- if (env->dmmu.sfsr & 1) /* Fault status register */
- env->dmmu.sfsr = 2; /* overflow (not read before
- another fault) */
+ if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
+ sfsr |= SFSR_OW_BIT; /* overflow (not read before
+ another fault) */
+ }
- env->dmmu.sfsr |= (is_user << 3) | ((rw == 1) << 2) | 1;
+ if (env->pstate & PS_PRIV) {
+ sfsr |= SFSR_PR_BIT;
+ }
- env->dmmu.sfsr |= (fault_type << 7);
+ /* FIXME: ASI field in SFSR must be set */
+ env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
env->dmmu.sfar = address; /* Fault address register */
DPRINTF_MMU("DMISS at %" PRIx64 " context %" PRIx64 "\n",
address, context);
+ /*
+ * On MMU misses:
+ * - UltraSPARC IIi: SFSR and SFAR unmodified
+ * - JPS1: SFAR updated and some fields of SFSR updated
+ */
env->dmmu.tag_access = (address & ~0x1fffULL) | context;
env->exception_index = TT_DMISS;
return 1;
if (ultrasparc_tag_match(&env->itlb[i],
address, context, physical)) {
// access ok?
- if ((env->itlb[i].tte & 0x4) && is_user) {
- if (env->immu.sfsr) /* Fault status register */
- env->immu.sfsr = 2; /* overflow (not read before
- another fault) */
- env->immu.sfsr |= (is_user << 3) | 1;
+ if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
+ /* Fault status register */
+ if (env->immu.sfsr & SFSR_VALID_BIT) {
+ env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
+ another fault) */
+ } else {
+ env->immu.sfsr = 0;
+ }
+ if (env->pstate & PS_PRIV) {
+ env->immu.sfsr |= SFSR_PR_BIT;
+ }
+ if (env->tl > 0) {
+ env->immu.sfsr |= SFSR_CT_NUCLEUS;
+ }
+
+ /* FIXME: ASI field in SFSR must be set */
+ env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
env->exception_index = TT_TFAULT;
env->immu.tag_access = (address & ~0x1fffULL) | context;
} else {
(*cpu_fprintf)(f, "DMMU dump\n");
for (i = 0; i < 64; i++) {
- switch ((env->dtlb[i].tte >> 61) & 3) {
+ switch (TTE_PGSIZE(env->dtlb[i].tte)) {
default:
case 0x0:
mask = " 8k";
mask = " 4M";
break;
}
- if ((env->dtlb[i].tte & 0x8000000000000000ULL) != 0) {
+ if (TTE_IS_VALID(env->dtlb[i].tte)) {
(*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %" PRIx64
", %s, %s, %s, %s, ctx %" PRId64 " %s\n",
i,
env->dtlb[i].tag & (uint64_t)~0x1fffULL,
- env->dtlb[i].tte & (uint64_t)0x1ffffffe000ULL,
+ TTE_PA(env->dtlb[i].tte),
mask,
- env->dtlb[i].tte & 0x4? "priv": "user",
- env->dtlb[i].tte & 0x2? "RW": "RO",
- env->dtlb[i].tte & 0x40? "locked": "unlocked",
+ TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
+ TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
+ TTE_IS_LOCKED(env->dtlb[i].tte) ?
+ "locked" : "unlocked",
env->dtlb[i].tag & (uint64_t)0x1fffULL,
TTE_IS_GLOBAL(env->dtlb[i].tte)?
"global" : "local");
} else {
(*cpu_fprintf)(f, "IMMU dump\n");
for (i = 0; i < 64; i++) {
- switch ((env->itlb[i].tte >> 61) & 3) {
+ switch (TTE_PGSIZE(env->itlb[i].tte)) {
default:
case 0x0:
mask = " 8k";
mask = " 4M";
break;
}
- if ((env->itlb[i].tte & 0x8000000000000000ULL) != 0) {
+ if (TTE_IS_VALID(env->itlb[i].tte)) {
(*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %" PRIx64
", %s, %s, %s, ctx %" PRId64 " %s\n",
i,
env->itlb[i].tag & (uint64_t)~0x1fffULL,
- env->itlb[i].tte & (uint64_t)0x1ffffffe000ULL,
+ TTE_PA(env->itlb[i].tte),
mask,
- env->itlb[i].tte & 0x4? "priv": "user",
- env->itlb[i].tte & 0x40? "locked": "unlocked",
+ TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
+ TTE_IS_LOCKED(env->itlb[i].tte) ?
+ "locked" : "unlocked",
env->itlb[i].tag & (uint64_t)0x1fffULL,
TTE_IS_GLOBAL(env->itlb[i].tte)?
"global" : "local");
#if !defined(CONFIG_USER_ONLY)
+static int cpu_sparc_get_phys_page(CPUState *env, target_phys_addr_t *phys,
+ target_ulong addr, int rw, int mmu_idx)
+{
+ target_ulong page_size;
+ int prot, access_index;
+
+ return get_physical_address(env, phys, &prot, &access_index, addr, rw,
+ mmu_idx, &page_size);
+}
+
+#if defined(TARGET_SPARC64)
target_phys_addr_t cpu_get_phys_page_nofault(CPUState *env, target_ulong addr,
int mmu_idx)
{
target_phys_addr_t phys_addr;
- target_ulong page_size;
- int prot, access_index;
- if (get_physical_address(env, &phys_addr, &prot, &access_index, addr, 2,
- mmu_idx, &page_size) != 0)
- if (get_physical_address(env, &phys_addr, &prot, &access_index, addr,
- 0, mmu_idx, &page_size) != 0)
- return -1;
- if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED)
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
return -1;
+ }
return phys_addr;
}
+#endif
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
- return cpu_get_phys_page_nofault(env, addr, cpu_mmu_index(env));
+ target_phys_addr_t phys_addr;
+ int mmu_idx = cpu_mmu_index(env);
+
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
+ return -1;
+ }
+ }
+ if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED) {
+ return -1;
+ }
+ return phys_addr;
}
#endif
VIS_HELPER(padd);
VIS_HELPER(psub);
#define VIS_CMPHELPER(name) \
- F_HELPER_0_0(name##16); \
- F_HELPER_0_0(name##32)
+ DEF_HELPER_0(f##name##16, i64); \
+ DEF_HELPER_0(f##name##32, i64)
VIS_CMPHELPER(cmpgt);
VIS_CMPHELPER(cmpeq);
VIS_CMPHELPER(cmple);
#define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */
#define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */
-#if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
+#if !defined(CONFIG_USER_ONLY)
+static void do_unassigned_access(target_phys_addr_t addr, int is_write,
+ int is_exec, int is_asi, int size);
+#else
+#ifdef TARGET_SPARC64
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
- int is_asi, int size);
+ int is_asi, int size);
+#endif
#endif
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
*/
switch (asi) {
case 0x04 ... 0x11:
- case 0x18 ... 0x19:
+ case 0x16 ... 0x19:
+ case 0x1E ... 0x1F:
case 0x24 ... 0x2C:
case 0x70 ... 0x73:
case 0x78 ... 0x79:
uint16_t w[4];
int16_t sw[4];
uint32_t l[2];
+ uint64_t ll;
float64 d;
} vis64;
VIS_HELPER(helper_fpsub, FSUB)
#define VIS_CMPHELPER(name, F) \
- void name##16(void) \
+ uint64_t name##16(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
- d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
- d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
- d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
- d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
+ d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0; \
+ d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0; \
\
- DT0 = d.d; \
+ return d.ll; \
} \
\
- void name##32(void) \
+ uint64_t name##32(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
- d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
- d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
+ d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0; \
+ d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0; \
+ d.VIS_L64(1) = 0; \
\
- DT0 = d.d; \
+ return d.ll; \
}
#define FCMPGT(a, b) ((a) > (b))
helper_check_align(addr, size - 1);
addr = asi_address_mask(env, asi, addr);
- switch (asi) {
- case 0x82: // Primary no-fault
- case 0x8a: // Primary no-fault LE
- case 0x83: // Secondary no-fault
- case 0x8b: // Secondary no-fault LE
- {
- /* secondary space access has lowest asi bit equal to 1 */
- int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
- : MMU_KERNEL_SECONDARY_IDX;
+ /* process nonfaulting loads first */
+ if ((asi & 0xf6) == 0x82) {
+ int mmu_idx;
+
+ /* secondary space access has lowest asi bit equal to 1 */
+ if (env->pstate & PS_PRIV) {
+ mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX;
+ } else {
+ mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX;
+ }
- if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
+ if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == -1ULL) {
#ifdef DEBUG_ASI
- dump_asi("read ", last_addr, asi, size, ret);
+ dump_asi("read ", last_addr, asi, size, ret);
#endif
- return 0;
- }
+ /* env->exception_index is set in get_physical_address_data(). */
+ raise_exception(env->exception_index);
}
- // Fall through
+
+ /* convert nonfaulting load ASIs to normal load ASIs */
+ asi &= ~0x02;
+ }
+
+ switch (asi) {
case 0x10: // As if user primary
case 0x11: // As if user secondary
case 0x18: // As if user primary LE
case 0x1d: // Bypass, non-cacheable LE
case 0x88: // Primary LE
case 0x89: // Secondary LE
- case 0x8a: // Primary no-fault LE
- case 0x8b: // Secondary no-fault LE
switch(size) {
case 2:
ret = bswap16(ret);
#ifndef TARGET_SPARC64
#if !defined(CONFIG_USER_ONLY)
-void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
- int is_asi, int size)
+static void do_unassigned_access(target_phys_addr_t addr, int is_write,
+ int is_exec, int is_asi, int size)
{
CPUState *saved_env;
int fault_type;
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
int is_asi, int size)
#else
-void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
- int is_asi, int size)
+static void do_unassigned_access(target_phys_addr_t addr, int is_write,
+ int is_exec, int is_asi, int size)
#endif
{
CPUState *saved_env;
#endif
}
#endif
+
+#if !defined(CONFIG_USER_ONLY)
+void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
+ int is_write, int is_exec, int is_asi, int size)
+{
+ env = env1;
+ do_unassigned_access(addr, is_write, is_exec, is_asi, size);
+}
+#endif
break;
case 0x102: /* V9 fmovdcc %icc */
FMOVDCC(0);
+ break;
case 0x103: /* V9 fmovqcc %icc */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVQCC(0);
CHECK_FPU_FEATURE(dc, VIS1);
gen_op_load_fpr_DT0(DFPREG(rs1));
gen_op_load_fpr_DT1(DFPREG(rs2));
- gen_helper_fcmple16();
- gen_op_store_DT0_fpr(DFPREG(rd));
+ gen_helper_fcmple16(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x022: /* VIS I fcmpne16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_op_load_fpr_DT0(DFPREG(rs1));
gen_op_load_fpr_DT1(DFPREG(rs2));
- gen_helper_fcmpne16();
- gen_op_store_DT0_fpr(DFPREG(rd));
+ gen_helper_fcmpne16(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x024: /* VIS I fcmple32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_op_load_fpr_DT0(DFPREG(rs1));
gen_op_load_fpr_DT1(DFPREG(rs2));
- gen_helper_fcmple32();
- gen_op_store_DT0_fpr(DFPREG(rd));
+ gen_helper_fcmple32(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x026: /* VIS I fcmpne32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_op_load_fpr_DT0(DFPREG(rs1));
gen_op_load_fpr_DT1(DFPREG(rs2));
- gen_helper_fcmpne32();
- gen_op_store_DT0_fpr(DFPREG(rd));
+ gen_helper_fcmpne32(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x028: /* VIS I fcmpgt16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_op_load_fpr_DT0(DFPREG(rs1));
gen_op_load_fpr_DT1(DFPREG(rs2));
- gen_helper_fcmpgt16();
- gen_op_store_DT0_fpr(DFPREG(rd));
+ gen_helper_fcmpgt16(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x02a: /* VIS I fcmpeq16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_op_load_fpr_DT0(DFPREG(rs1));
gen_op_load_fpr_DT1(DFPREG(rs2));
- gen_helper_fcmpeq16();
- gen_op_store_DT0_fpr(DFPREG(rd));
+ gen_helper_fcmpeq16(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x02c: /* VIS I fcmpgt32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_op_load_fpr_DT0(DFPREG(rs1));
gen_op_load_fpr_DT1(DFPREG(rs2));
- gen_helper_fcmpgt32();
- gen_op_store_DT0_fpr(DFPREG(rd));
+ gen_helper_fcmpgt32(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x02e: /* VIS I fcmpeq32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_op_load_fpr_DT0(DFPREG(rs1));
gen_op_load_fpr_DT1(DFPREG(rs2));
- gen_helper_fcmpeq32();
- gen_op_store_DT0_fpr(DFPREG(rd));
+ gen_helper_fcmpeq32(cpu_dst);
+ gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x031: /* VIS I fmul8x16 */
CHECK_FPU_FEATURE(dc, VIS1);
}
/* Call generated code */
- tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1]), 0);
+ tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
tb_ret_addr = s->code_ptr;
disable virtio_irq(void *vq) "vq %p"
disable virtio_notify(void *vdev, void *vq) "vdev %p vq %p"
+# hw/virtio-serial-bus.c
+disable virtio_serial_send_control_event(unsigned int port, uint16_t event, uint16_t value) "port %u, event %u, value %u"
+disable virtio_serial_throttle_port(unsigned int port, bool throttle) "port %u, throttle %d"
+disable virtio_serial_handle_control_message(uint16_t event, uint16_t value) "event %u, value %u"
+disable virtio_serial_handle_control_message_port(unsigned int port) "port %u"
+
+# hw/virtio-console.c
+disable virtio_console_flush_buf(unsigned int port, size_t len, ssize_t ret) "port %u, in_len %zu, out_len %zd"
+disable virtio_console_chr_read(unsigned int port, int size) "port %u, size %d"
+disable virtio_console_chr_event(unsigned int port, int event) "port %u, event %d"
+
# block.c
disable multiwrite_cb(void *mcb, int ret) "mcb %p ret %d"
disable bdrv_aio_multiwrite(void *mcb, int num_callbacks, int num_reqs) "mcb %p num_callbacks %d num_reqs %d"
disable cs4231_mem_writel_reg(uint32_t reg, uint32_t old, uint32_t val) "write reg %d: 0x%08x -> 0x%08x"
disable cs4231_mem_writel_dreg(uint32_t reg, uint32_t old, uint32_t val) "write dreg %d: 0x%02x -> 0x%02x"
+# hw/ds1225y.c
+disable nvram_read(uint32_t addr, uint32_t ret) "read addr %d: 0x%02x"
+disable nvram_write(uint32_t addr, uint32_t old, uint32_t val) "write addr %d: 0x%02x -> 0x%02x"
+
# hw/eccmemctl.c
disable ecc_mem_writel_mer(uint32_t val) "Write memory enable %08x"
disable ecc_mem_writel_mdr(uint32_t val) "Write memory delay %08x"
disable xen_client_set_memory(uint64_t start_addr, unsigned long size, unsigned long phys_offset, bool log_dirty) "%#"PRIx64" size %#lx, offset %#lx, log_dirty %i"
# xen-mapcache.c
-disable qemu_map_cache(uint64_t phys_addr) "want %#"PRIx64""
-disable qemu_remap_bucket(uint64_t index) "index %#"PRIx64""
-disable qemu_map_cache_return(void* ptr) "%p"
+disable xen_map_cache(uint64_t phys_addr) "want %#"PRIx64""
+disable xen_remap_bucket(uint64_t index) "index %#"PRIx64""
+disable xen_map_cache_return(void* ptr) "%p"
disable xen_map_block(uint64_t phys_addr, uint64_t size) "%#"PRIx64", size %#"PRIx64""
disable xen_unmap_block(void* addr, unsigned long size) "%p, size %#lx"
qemu_opt_foreach(opts, add_channel, NULL, 0);
- spice_server_init(spice_server, &core_interface);
+ if (0 != spice_server_init(spice_server, &core_interface)) {
+ fprintf(stderr, "failed to initialize spice server");
+ exit(1);
+ };
using_spice = 1;
migration_state.notify = migration_state_notifier;
QXLCommand *cmd;
uint8_t *src, *dst;
int by, bw, bh;
+ struct timespec time_space;
if (qemu_spice_rect_is_empty(&ssd->dirty)) {
return NULL;
drawable->surfaces_dest[0] = -1;
drawable->surfaces_dest[1] = -1;
drawable->surfaces_dest[2] = -1;
+ clock_gettime(CLOCK_MONOTONIC, &time_space);
+ /* time in milliseconds from epoch. */
+ drawable->mm_time = time_space.tv_sec * 1000
+ + time_space.tv_nsec / 1000 / 1000;
drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
drawable->u.copy.src_bitmap = (intptr_t)image;
case IOREQ_TYPE_TIMEOFFSET:
break;
case IOREQ_TYPE_INVALIDATE:
- qemu_invalidate_map_cache();
+ xen_invalidate_map_cache();
break;
default:
hw_error("Invalid ioreq type 0x%x\n", req->type);
}
}
+static int store_dev_info(int domid, CharDriverState *cs, const char *string)
+{
+ struct xs_handle *xs = NULL;
+ char *path = NULL;
+ char *newpath = NULL;
+ char *pts = NULL;
+ int ret = -1;
+
+ /* Only continue if we're talking to a pty. */
+ if (strncmp(cs->filename, "pty:", 4)) {
+ return 0;
+ }
+ pts = cs->filename + 4;
+
+ /* We now have everything we need to set the xenstore entry. */
+ xs = xs_open(0);
+ if (xs == NULL) {
+ fprintf(stderr, "Could not contact XenStore\n");
+ goto out;
+ }
+
+ path = xs_get_domain_path(xs, domid);
+ if (path == NULL) {
+ fprintf(stderr, "xs_get_domain_path() error\n");
+ goto out;
+ }
+ newpath = realloc(path, (strlen(path) + strlen(string) +
+ strlen("/tty") + 1));
+ if (newpath == NULL) {
+ fprintf(stderr, "realloc error\n");
+ goto out;
+ }
+ path = newpath;
+
+ strcat(path, string);
+ strcat(path, "/tty");
+ if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) {
+ fprintf(stderr, "xs_write for '%s' fail", string);
+ goto out;
+ }
+ ret = 0;
+
+out:
+ free(path);
+ xs_close(xs);
+
+ return ret;
+}
+
+void xenstore_store_pv_console_info(int i, CharDriverState *chr)
+{
+ if (i == 0) {
+ store_dev_info(xen_domid, chr, "/console");
+ } else {
+ char buf[32];
+ snprintf(buf, sizeof(buf), "/device/console/%d", i);
+ store_dev_info(xen_domid, chr, buf);
+ }
+}
+
static void xenstore_record_dm_state(XenIOState *s, const char *state)
{
char path[50];
}
/* Init RAM management */
- qemu_map_cache_init();
+ xen_map_cache_init();
xen_ram_init(ram_size);
qemu_add_vm_change_state_handler(xen_vm_change_state_handler, state);
cpu_register_phys_memory_client(&state->client);
state->log_for_dirtybit = NULL;
+ /* Initialize backend core & drivers */
+ if (xen_be_init() != 0) {
+ fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
+ exit(1);
+ }
+ xen_be_register("console", &xen_console_ops);
+ xen_be_register("vkbd", &xen_kbdmouse_ops);
+ xen_be_register("qdisk", &xen_blkdev_ops);
+
return 0;
}
+++ /dev/null
-/*
- * Copyright (C) 2011 Citrix Ltd.
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#include "config.h"
-
-#include "cpu.h"
-#include "qemu-common.h"
-#include "cpu-common.h"
-#include "xen-mapcache.h"
-
-void qemu_map_cache_init(void)
-{
-}
-
-uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock)
-{
- return qemu_get_ram_ptr(phys_addr);
-}
-
-ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
-{
- return -1;
-}
-
-void qemu_invalidate_map_cache(void)
-{
-}
-
-void qemu_invalidate_entry(uint8_t *buffer)
-{
-}
#endif
#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
+#define mapcache_lock() ((void)0)
+#define mapcache_unlock() ((void)0)
+
typedef struct MapCacheEntry {
target_phys_addr_t paddr_index;
uint8_t *vaddr_base;
return 0;
}
-void qemu_map_cache_init(void)
+void xen_map_cache_init(void)
{
unsigned long size;
struct rlimit rlimit_as;
size = mapcache->nr_buckets * sizeof (MapCacheEntry);
size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
- DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache->nr_buckets, size);
+ DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
+ mapcache->nr_buckets, size);
mapcache->entry = qemu_mallocz(size);
}
-static void qemu_remap_bucket(MapCacheEntry *entry,
- target_phys_addr_t size,
- target_phys_addr_t address_index)
+static void xen_remap_bucket(MapCacheEntry *entry,
+ target_phys_addr_t size,
+ target_phys_addr_t address_index)
{
uint8_t *vaddr_base;
xen_pfn_t *pfns;
unsigned int i;
target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT;
- trace_qemu_remap_bucket(address_index);
+ trace_xen_remap_bucket(address_index);
pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t));
err = qemu_mallocz(nb_pfn * sizeof (int));
qemu_free(err);
}
-uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock)
+uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
+ uint8_t lock)
{
MapCacheEntry *entry, *pentry = NULL;
target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
target_phys_addr_t __size = size;
- trace_qemu_map_cache(phys_addr);
+ trace_xen_map_cache(phys_addr);
if (address_index == mapcache->last_address_index && !lock && !__size) {
- trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset);
+ trace_xen_map_cache_return(mapcache->last_address_vaddr + address_offset);
return mapcache->last_address_vaddr + address_offset;
}
if (!entry) {
entry = qemu_mallocz(sizeof (MapCacheEntry));
pentry->next = entry;
- qemu_remap_bucket(entry, __size, address_index);
+ xen_remap_bucket(entry, __size, address_index);
} else if (!entry->lock) {
if (!entry->vaddr_base || entry->paddr_index != address_index ||
entry->size != __size ||
!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
entry->valid_mapping)) {
- qemu_remap_bucket(entry, __size, address_index);
+ xen_remap_bucket(entry, __size, address_index);
}
}
if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
entry->valid_mapping)) {
mapcache->last_address_index = -1;
- trace_qemu_map_cache_return(NULL);
+ trace_xen_map_cache_return(NULL);
return NULL;
}
QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
}
- trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset);
+ trace_xen_map_cache_return(mapcache->last_address_vaddr + address_offset);
return mapcache->last_address_vaddr + address_offset;
}
-ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
+ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
{
MapCacheEntry *entry = NULL, *pentry = NULL;
MapCacheRev *reventry;
}
}
if (!found) {
- fprintf(stderr, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr);
+ fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
reventry->vaddr_req);
((unsigned long) ptr - (unsigned long) entry->vaddr_base);
}
-void qemu_invalidate_entry(uint8_t *buffer)
+void xen_invalidate_map_cache_entry(uint8_t *buffer)
{
MapCacheEntry *entry = NULL, *pentry = NULL;
MapCacheRev *reventry;
}
}
if (!found) {
- DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer);
+ DPRINTF("%s, could not find %p\n", __func__, buffer);
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
}
qemu_free(entry);
}
-void qemu_invalidate_map_cache(void)
+void xen_invalidate_map_cache(void)
{
unsigned long i;
MapCacheRev *reventry;
#ifndef XEN_MAPCACHE_H
#define XEN_MAPCACHE_H
-void qemu_map_cache_init(void);
-uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock);
-ram_addr_t qemu_ram_addr_from_mapcache(void *ptr);
-void qemu_invalidate_entry(uint8_t *buffer);
-void qemu_invalidate_map_cache(void);
-
-#define mapcache_lock() ((void)0)
-#define mapcache_unlock() ((void)0)
+void xen_map_cache_init(void);
+uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
+ uint8_t lock);
+ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
+void xen_invalidate_map_cache_entry(uint8_t *buffer);
+void xen_invalidate_map_cache(void);
#endif /* !XEN_MAPCACHE_H */
#include "qemu-common.h"
#include "hw/xen.h"
+void xenstore_store_pv_console_info(int i, CharDriverState *chr)
+{
+}
+
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
{
return -1;