From: SeokYeon Hwang Date: Wed, 10 Dec 2014 05:48:31 +0000 (+0900) Subject: Merge 'v2.2.0' into tizen_next_qemu_2.2 X-Git-Tag: Tizen_Studio_1.3_Release_p2.3.2~608^2~7 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ec77d208f0cc6de712c9b0b1345726a7258e57a1;p=sdk%2Femulator%2Fqemu.git Merge 'v2.2.0' into tizen_next_qemu_2.2 v2.2.0 release Conflicts: blockdev.c configure cpu-exec.c exec.c hw/Makefile.objs hw/i386/pc_piix.c qom/object.c target-i386/translate.c util/oslib-posix.c util/oslib-win32.c vl.c xen-hvm.c Signed-off-by: SeokYeon Hwang Change-Id: I0f708eddfb51b98b9a7453e6e92f20c90a8b5aaf --- ec77d208f0cc6de712c9b0b1345726a7258e57a1 diff --cc Makefile.target index 742d2d0223,e9ff1eed7b..0f0431eb66 --- a/Makefile.target +++ b/Makefile.target @@@ -120,18 -137,11 +137,20 @@@ obj-y += dump. LIBS+=$(libs_softmmu) # xen support - obj-$(CONFIG_XEN) += xen-all.o xen-mapcache.o - obj-$(call lnot,$(CONFIG_XEN)) += xen-stub.o + obj-$(CONFIG_XEN) += xen-common.o + obj-$(CONFIG_XEN_I386) += xen-hvm.o xen-mapcache.o + obj-$(call lnot,$(CONFIG_XEN)) += xen-common-stub.o + obj-$(call lnot,$(CONFIG_XEN_I386)) += xen-hvm-stub.o +# HAX support +ifdef CONFIG_WIN32 +obj-$(CONFIG_HAX) += target-i386/hax-all.o target-i386/hax-windows.o +endif +ifdef CONFIG_DARWIN +obj-$(CONFIG_HAX) += target-i386/hax-all.o target-i386/hax-darwin.o +endif +obj-$(call lnot,$(CONFIG_HAX)) += hax-stub.o + # Hardware support ifeq ($(TARGET_NAME), sparc64) obj-y += hw/sparc64/ @@@ -153,15 -157,14 +172,15 @@@ endif # CONFIG_SOFTMM %/translate.o: QEMU_CFLAGS += $(TRANSLATE_OPT_CFLAGS) dummy := $(call unnest-vars,,obj-y) + all-obj-y := $(obj-y) - # we are making another call to unnest-vars with different vars, protect obj-y, - # it can be overriden in subdir Makefile.objs - obj-y-save := $(obj-y) - + target-obj-y := block-obj-y := common-obj-y := + include $(SRC_PATH)/Makefile.objs + dummy := $(call unnest-vars,,target-obj-y) + target-obj-y-save := $(target-obj-y) dummy := $(call unnest-vars,.., \ block-obj-y \ block-obj-m \ diff --cc accel.c index 0000000000,74e41daaa5..bef28e6a93 mode 000000,100644..100644 --- a/accel.c +++ b/accel.c @@@ -1,0 -1,157 +1,165 @@@ + /* + * QEMU System Emulator, accelerator interfaces + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + + #include "sysemu/accel.h" + #include "hw/boards.h" + #include "qemu-common.h" + #include "sysemu/arch_init.h" + #include "sysemu/sysemu.h" + #include "sysemu/kvm.h" + #include "sysemu/qtest.h" + #include "hw/xen/xen.h" + #include "qom/object.h" + #include "hw/boards.h" ++#include "sysemu/hax.h" ++ ++#ifdef CONFIG_MARU ++#include "tizen/src/util/maru_err_table.h" ++#endif + + int tcg_tb_size; + static bool tcg_allowed = true; + + static int tcg_init(MachineState *ms) + { + tcg_exec_init(tcg_tb_size * 1024 * 1024); + return 0; + } + + static const TypeInfo accel_type = { + .name = TYPE_ACCEL, + .parent = TYPE_OBJECT, + .class_size = sizeof(AccelClass), + .instance_size = sizeof(AccelState), + }; + + /* Lookup AccelClass from opt_name. Returns NULL if not found */ + static AccelClass *accel_find(const char *opt_name) + { + char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name); + AccelClass *ac = ACCEL_CLASS(object_class_by_name(class_name)); + g_free(class_name); + return ac; + } + + static int accel_init_machine(AccelClass *acc, MachineState *ms) + { + ObjectClass *oc = OBJECT_CLASS(acc); + const char *cname = object_class_get_name(oc); + AccelState *accel = ACCEL(object_new(cname)); + int ret; + ms->accelerator = accel; + *(acc->allowed) = true; + ret = acc->init_machine(ms); + if (ret < 0) { + ms->accelerator = NULL; + *(acc->allowed) = false; + object_unref(OBJECT(accel)); + } + return ret; + } + + int configure_accelerator(MachineState *ms) + { + const char *p; + char buf[10]; + int ret; + bool accel_initialised = false; + bool init_failed = false; + AccelClass *acc = NULL; + + p = qemu_opt_get(qemu_get_machine_opts(), "accel"); + if (p == NULL) { + /* Use the default "accelerator", tcg */ + p = "tcg"; + } + + while (!accel_initialised && *p != '\0') { + if (*p == ':') { + p++; + } + p = get_opt_name(buf, sizeof(buf), p, ':'); + acc = accel_find(buf); + if (!acc) { + fprintf(stderr, "\"%s\" accelerator not found.\n", buf); + continue; + } + if (acc->available && !acc->available()) { + printf("%s not supported for this target\n", + acc->name); + continue; + } + ret = accel_init_machine(acc, ms); + if (ret < 0) { + init_failed = true; + fprintf(stderr, "failed to initialize %s: %s\n", + acc->name, + strerror(-ret)); + } else { + accel_initialised = true; + } + } + + if (!accel_initialised) { + if (!init_failed) { + fprintf(stderr, "No accelerator found!\n"); + } ++#ifdef CONFIG_MARU ++ maru_register_exit_msg(MARU_EXIT_UNKNOWN, "No accelerator found."); ++#endif + exit(1); + } + + if (init_failed) { + fprintf(stderr, "Back to %s accelerator.\n", acc->name); + } + + return !accel_initialised; + } + + + static void tcg_accel_class_init(ObjectClass *oc, void *data) + { + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "tcg"; + ac->init_machine = tcg_init; + ac->allowed = &tcg_allowed; + } + + #define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg") + + static const TypeInfo tcg_accel_type = { + .name = TYPE_TCG_ACCEL, + .parent = TYPE_ACCEL, + .class_init = tcg_accel_class_init, + }; + + static void register_accel_types(void) + { + type_register_static(&accel_type); + type_register_static(&tcg_accel_type); + } + + type_init(register_accel_types); diff --cc block/raw-win32.c index 7a0b766243,7b588815b9..711875396b --- a/block/raw-win32.c +++ b/block/raw-win32.c @@@ -307,18 -315,8 +324,9 @@@ static int raw_open(BlockDriverState *b filename = qemu_opt_get(opts, "filename"); +#ifndef CONFIG_MARU raw_parse_flags(flags, &access_flags, &overlapped); - if ((flags & BDRV_O_NATIVE_AIO) && aio == NULL) { - aio = win32_aio_init(); - if (aio == NULL) { - error_setg(errp, "Could not initialize AIO"); - ret = -EINVAL; - goto fail; - } - } - if (filename[0] && filename[1] == ':') { snprintf(s->drive_path, sizeof(s->drive_path), "%c:\\", filename[0]); } else if (filename[0] == '\\' && filename[1] == '\\') { @@@ -343,44 -341,19 +351,53 @@@ } goto fail; } +#else + open_flags = O_BINARY & ~O_ACCMODE; + if (flags & BDRV_O_RDWR) { + open_flags |= O_RDWR; + } else { + open_flags |= O_RDONLY; + } + + /* Use O_DSYNC for write-through caching, no flags for write-back caching, + * and O_DIRECT for no caching. */ + /* + if ((flags & BDRV_O_NOCACHE)) { + open_flags |= O_DIRECT; + } + if (!(flags & BDRV_O_CACHE_WB)) { + open_flags |= O_DSYNC; + } + */ + + if ((flags & BDRV_O_NATIVE_AIO) && aio == NULL) { + aio = win32_aio_init(); + if (aio == NULL) { + ret = -EINVAL; + goto fail; + } + } + + ret = qemu_open(filename, open_flags, 0644); + if (ret < 0) { + error_report("raw_open failed(%d) \n", ret); + return -errno; + } + s->hfile = (HANDLE)_get_osfhandle(ret); +#endif if (flags & BDRV_O_NATIVE_AIO) { - ret = win32_aio_attach(aio, s->hfile); + s->aio = win32_aio_init(); + if (s->aio == NULL) { + CloseHandle(s->hfile); + error_setg(errp, "Could not initialize AIO"); + ret = -EINVAL; + goto fail; + } + + ret = win32_aio_attach(s->aio, s->hfile); if (ret < 0) { + win32_aio_cleanup(s->aio); CloseHandle(s->hfile); error_setg_errno(errp, -ret, "Could not enable AIO"); goto fail; diff --cc blockdev.c index f8c63c902a,57910b82c7..5870b2571b --- a/blockdev.c +++ b/blockdev.c @@@ -46,12 -47,6 +47,10 @@@ #include "trace.h" #include "sysemu/arch_init.h" +#ifdef CONFIG_MARU +#include "tizen/src/util/maru_err_table.h" +#endif + - static QTAILQ_HEAD(drivelist, DriveInfo) drives = QTAILQ_HEAD_INITIALIZER(drives); - static const char *const if_name[IF_COUNT] = { [IF_NONE] = "none", [IF_IDE] = "ide", @@@ -508,21 -548,12 +552,22 @@@ static BlockBackend *blockdev_init(cons bdrv_flags |= ro ? 0 : BDRV_O_RDWR; QINCREF(bs_opts); - ret = bdrv_open(&dinfo->bdrv, file, NULL, bs_opts, bdrv_flags, drv, &error); + ret = bdrv_open(&bs, file, NULL, bs_opts, bdrv_flags, drv, &error); + assert(bs == blk_bs(blk)); if (ret < 0) { +#ifdef CONFIG_MARU + char *path = get_canonical_path(file); + char *msg = g_strdup_printf("Failed to load disk file from the following path. Check if the file is corrupted or missing.\n\n%s", path); + + start_simple_client(msg); + + g_free(path); + g_free(msg); +#endif + error_setg(errp, "could not open disk image %s: %s", - file ?: dinfo->id, error_get_pretty(error)); + file ?: blk_name(blk), error_get_pretty(error)); error_free(error); goto err; } diff --cc configure index a88fa73904,47048f0086..177a759b13 --- a/configure +++ b/configure @@@ -300,15 -309,10 +310,15 @@@ rbd=" smartcard_nss="" libusb="" usb_redir="" +opengl="" +efence="no" +yagl="no" +yagl_stats="no" glx="" +vigs="no" zlib="yes" - lzo="no" - snappy="no" + lzo="" + snappy="" guest_agent="" guest_agent_with_vss="no" vss_win32_sdk="" @@@ -968,11 -976,7 +999,9 @@@ for opt d ;; --disable-pie) pie="no" ;; - --enable-uname-release=*) uname_release="$optarg" - ;; - --enable-werror) werror="yes" + --enable-werror) + werror="yes" ; + force_werror="yes" ; ;; --disable-werror) werror="no" ;; @@@ -1411,28 -1398,12 +1459,30 @@@ Advanced options (experts only) --enable-vhdx enable support for the Microsoft VHDX image format --disable-quorum disable quorum block filter support --enable-quorum enable quorum block filter support + --disable-numa disable libnuma support + --enable-numa enable libnuma support + --disable-hax disable HAX acceleration support + --enable-hax enable HAX acceleration support + --enable-yagl enable YaGL device + --disable-yagl disable YaGL device + --enable-yagl-stats enable YaGL stats + --disable-yagl-stats disable YaGL stats + --enable-vigs enable VIGS device + --disable-vigs disable VIGS device + +TIZEN-maru options: + --enable-maru enable maru board + --enable-shm enable shared memory for framebuffer + --enable-libav enable libav library + --disable-libav disable libav library + --enable-libpng enable png library + --enable-dxva2 enable dxva2 support + --enable-vaapi enable vaapi support + NOTE: The object files are built at the place where configure is launched EOF - exit 1 + exit 0 fi # Now we have handled --enable-tcg-interpreter and know we're not just @@@ -4372,13 -4320,8 +4533,9 @@@ echo "xfsctl support $xfs echo "nss used $smartcard_nss" echo "libusb $libusb" echo "usb net redir $usb_redir" +echo "EFence support $efence" echo "GLX support $glx" - if test "$libiscsi_version" = "1.4.0"; then - echo "libiscsi support $libiscsi (1.4.0)" - else echo "libiscsi support $libiscsi" - fi echo "libnfs support $libnfs" echo "build guest agent $guest_agent" echo "QGA VSS support $guest_agent_with_vss" @@@ -4397,22 -4340,8 +4554,23 @@@ echo "vhdx $vhdx echo "Quorum $quorum" echo "lzo support $lzo" echo "snappy support $snappy" + echo "NUMA host support $numa" +echo "HAX support $hax" +echo "YaGL support $yagl" +echo "YaGL stats $yagl_stats" +echo "OpenGL support $opengl" +echo "VIGS support $vigs" + +# for TIZEN-maru +echo "TIZEN-maru support $maru" +echo "TIZEN-maru shared framebuffer support $shm" +echo "TIZEN-maru libav support $libav" +echo "TIZEN-maru libpng support $libpng" +echo "TIZEN-maru DXVA2 support $dxva2" +echo "TIZEN-maru vaapi support $vaapi" +# + if test "$sdl_too_old" = "yes"; then echo "-> Your SDL version is too old - please upgrade to have SDL support" fi @@@ -4719,40 -4668,10 +4897,40 @@@ if test "$spice" = "yes" ; the echo "CONFIG_SPICE=y" >> $config_host_mak fi +if test "$efence" = "yes" ; then + echo "CONFIG_EFENCE=y" >> $config_host_mak +fi + +if test "$yagl" = "yes" ; then + echo "CONFIG_YAGL=y" >> $config_host_mak + if test "$linux" = "yes" ; then + LIBS="-lGLU -ldl $LIBS" + elif test "$mingw32" = "yes" ; then + LIBS="-lopengl32 -lglu32 $LIBS" + elif test "$darwin" = "yes" ; then + LIBS="-framework OpenGL -framework AGL -framework GLUT $LIBS" + else + echo "ERROR: YaGL is not available on $targetos" + exit 1 + fi +fi + +if test "$yagl_stats" = "yes" ; then + echo "CONFIG_YAGL_STATS=y" >> $config_host_mak +fi + +if test "$vigs" = "yes" ; then + echo "CONFIG_VIGS=y" >> $config_host_mak +fi + +if test "$smartcard" = "yes" ; then + echo "CONFIG_SMARTCARD=y" >> $config_host_mak +fi + if test "$smartcard_nss" = "yes" ; then echo "CONFIG_SMARTCARD_NSS=y" >> $config_host_mak - echo "libcacard_libs=$libcacard_libs" >> $config_host_mak - echo "libcacard_cflags=$libcacard_cflags" >> $config_host_mak + echo "NSS_LIBS=$nss_libs" >> $config_host_mak + echo "NSS_CFLAGS=$nss_cflags" >> $config_host_mak fi if test "$libusb" = "yes" ; then diff --cc cpu-exec.c index 64a6150fc0,3913de020b..ff40f014a2 --- a/cpu-exec.c +++ b/cpu-exec.c @@@ -22,8 -23,110 +23,111 @@@ #include "tcg.h" #include "qemu/atomic.h" #include "sysemu/qtest.h" + #include "qemu/timer.h" +#include "sysemu/hax.h" + /* -icount align implementation. */ + + typedef struct SyncClocks { + int64_t diff_clk; + int64_t last_cpu_icount; + int64_t realtime_clock; + } SyncClocks; + + #if !defined(CONFIG_USER_ONLY) + /* Allow the guest to have a max 3ms advance. + * The difference between the 2 clocks could therefore + * oscillate around 0. + */ + #define VM_CLOCK_ADVANCE 3000000 + #define THRESHOLD_REDUCE 1.5 + #define MAX_DELAY_PRINT_RATE 2000000000LL + #define MAX_NB_PRINTS 100 + + static void align_clocks(SyncClocks *sc, const CPUState *cpu) + { + int64_t cpu_icount; + + if (!icount_align_option) { + return; + } + + cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; + sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); + sc->last_cpu_icount = cpu_icount; + + if (sc->diff_clk > VM_CLOCK_ADVANCE) { + #ifndef _WIN32 + struct timespec sleep_delay, rem_delay; + sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; + sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; + if (nanosleep(&sleep_delay, &rem_delay) < 0) { + sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL; + sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec; + } else { + sc->diff_clk = 0; + } + #else + Sleep(sc->diff_clk / SCALE_MS); + sc->diff_clk = 0; + #endif + } + } + + static void print_delay(const SyncClocks *sc) + { + static float threshold_delay; + static int64_t last_realtime_clock; + static int nb_prints; + + if (icount_align_option && + sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && + nb_prints < MAX_NB_PRINTS) { + if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || + (-sc->diff_clk / (float)1000000000LL < + (threshold_delay - THRESHOLD_REDUCE))) { + threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; + printf("Warning: The guest is now late by %.1f to %.1f seconds\n", + threshold_delay - 1, + threshold_delay); + nb_prints++; + last_realtime_clock = sc->realtime_clock; + } + } + } + + static void init_delay_params(SyncClocks *sc, + const CPUState *cpu) + { + if (!icount_align_option) { + return; + } + sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - + sc->realtime_clock + + cpu_get_clock_offset(); + sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; + if (sc->diff_clk < max_delay) { + max_delay = sc->diff_clk; + } + if (sc->diff_clk > max_advance) { + max_advance = sc->diff_clk; + } + + /* Print every 2s max if the guest is late. We limit the number + of printed messages to NB_PRINT_MAX(currently 100) */ + print_delay(sc); + } + #else + static void align_clocks(SyncClocks *sc, const CPUState *cpu) + { + } + + static void init_delay_params(SyncClocks *sc, const CPUState *cpu) + { + } + #endif /* CONFIG USER ONLY */ + void cpu_loop_exit(CPUState *cpu) { cpu->current_tb = NULL; @@@ -709,10 -525,9 +549,13 @@@ int cpu_exec(CPUArchState *env } } cpu->current_tb = NULL; +#ifdef CONFIG_HAX + if (hax_enabled() && hax_stop_emulation(cpu)) + cpu_loop_exit(cpu); +#endif + /* Try to align the host and virtual clocks + if the guest is in advance */ + align_clocks(&sc, cpu); /* reset soft MMU for next block (it can currently only be set by a memory fault) */ } /* for(;;) */ diff --cc exec.c index 2c26903e4a,71ac104b39..c54be02900 --- a/exec.c +++ b/exec.c @@@ -1256,63 -1303,26 +1304,41 @@@ static ram_addr_t ram_block_add(RAMBloc old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; - size = TARGET_PAGE_ALIGN(size); - new_block = g_malloc0(sizeof(*new_block)); - new_block->fd = -1; - /* This assumes the iothread lock is taken here too. */ qemu_mutex_lock_ramlist(); - new_block->mr = mr; - new_block->offset = find_ram_offset(size); - if (host) { - new_block->host = host; - new_block->flags |= RAM_PREALLOC_MASK; - } else if (xen_enabled()) { - if (mem_path) { - fprintf(stderr, "-mem-path not supported with Xen\n"); - exit(1); - } - xen_ram_alloc(new_block->offset, size, mr); - } else { - if (mem_path) { - if (phys_mem_alloc != qemu_anon_ram_alloc) { - /* - * file_ram_alloc() needs to allocate just like - * phys_mem_alloc, but we haven't bothered to provide - * a hook there. - */ - fprintf(stderr, - "-mem-path not supported with this accelerator\n"); - exit(1); - } - new_block->host = file_ram_alloc(new_block, size, mem_path); - } - if (!new_block->host) { - new_block->host = phys_mem_alloc(size); + new_block->offset = find_ram_offset(new_block->length); + + if (!new_block->host) { + if (xen_enabled()) { + xen_ram_alloc(new_block->offset, new_block->length, new_block->mr); + } else { + new_block->host = phys_mem_alloc(new_block->length, + &new_block->mr->align); +#ifdef CONFIG_HAX + /* + * In Hax, the qemu allocate the virtual address, and HAX kernel + * populate the memory with physical memory. Currently we have no + * paging, so user should make sure enough free memory in advance + */ + if (hax_enabled()) { + int ret; + ret = hax_populate_ram((uint64_t)(uintptr_t)new_block->host, size); + if (ret < 0) { + fprintf(stderr, "Hax failed to populate ram\n"); + exit(-1); + } + } +#endif if (!new_block->host) { - fprintf(stderr, "Cannot set up guest memory '%s': %s\n", - new_block->mr->name, strerror(errno)); - exit(1); + error_setg_errno(errp, errno, + "cannot set up guest memory '%s'", + memory_region_name(new_block->mr)); + qemu_mutex_unlock_ramlist(); + return -1; } - memory_try_enable_merging(new_block->host, size); + memory_try_enable_merging(new_block->host, new_block->length); } } - new_block->length = size; /* Keep the list sorted from biggest to smallest block. */ QTAILQ_FOREACH(block, &ram_list.blocks, next) { diff --cc hw/9pfs/virtio-9p-device.c index 05dd75999d,2572747629..6edc495d60 --- a/hw/9pfs/virtio-9p-device.c +++ b/hw/9pfs/virtio-9p-device.c @@@ -19,11 -19,8 +19,12 @@@ #include "fsdev/qemu-fsdev.h" #include "virtio-9p-xattr.h" #include "virtio-9p-coth.h" + #include "hw/virtio/virtio-access.h" +#ifdef CONFIG_MARU +#include "tizen/src/util/maru_err_table.h" +#endif + static uint32_t virtio_9p_get_features(VirtIODevice *vdev, uint32_t features) { features |= 1 << VIRTIO_9P_MOUNT_TAG; diff --cc hw/9pfs/virtio-9p-maru.c index 0811794527,0000000000..6cd62bb8b2 mode 100644,000000..100644 --- a/hw/9pfs/virtio-9p-maru.c +++ b/hw/9pfs/virtio-9p-maru.c @@@ -1,3558 -1,0 +1,3559 @@@ +/* + * Virtio 9p backend for Maru + * Based on hw/9pfs/virtio-9p.c: + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd All Rights Reserved + * + * Contact: + * Sooyoung Ha + * YeongKyoon Lee + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + */ + +#include "hw/virtio/virtio.h" +#include "hw/i386/pc.h" +#include "qemu/sockets.h" +#include "virtio-9p.h" +#include "fsdev/qemu-fsdev.h" +#include "virtio-9p-xattr.h" +#include "virtio-9p-coth.h" +#include "trace.h" +#include "migration/migration.h" + +#ifdef CONFIG_WIN32 +#include "tizen/src/resources_win32.h" + +/* On windows, there's no mknod function. The device number is meaningless */ +#define makedev(x,y) 0 +#define major(x) 0 +#define minor(x) 0 + +/* Old MinGW's struct dirent doesn't support d_type member */ +#define WIN32_D_TYPE 0 + +extern uint64_t hostBytesPerSector; +#endif + +#ifdef CONFIG_DARWIN +#define O_DIRECT 040000 /* Direct disk access */ +#define O_NOATIME 01000000 /* Do not set atime */ +#endif + +#include "../../tizen/src/debug_ch.h" +MULTI_DEBUG_CHANNEL(tizen, 9pfs); + +int open_fd_hw; +int total_open_fd; +static int open_fd_rc; + +enum { + Oread = 0x00, + Owrite = 0x01, + Ordwr = 0x02, + Oexec = 0x03, + Oexcl = 0x04, + Otrunc = 0x10, + Orexec = 0x20, + Orclose = 0x40, + Oappend = 0x80, +}; + +static int omode_to_uflags(int8_t mode) +{ + int ret = 0; + + switch (mode & 3) { + case Oread: + ret = O_RDONLY; + break; + case Ordwr: + ret = O_RDWR; + break; + case Owrite: + ret = O_WRONLY; + break; + case Oexec: + ret = O_RDONLY; + break; + } + + if (mode & Otrunc) { + ret |= O_TRUNC; + } + + if (mode & Oappend) { + ret |= O_APPEND; + } + + if (mode & Oexcl) { + ret |= O_EXCL; + } + + return ret; +} + +struct dotl_openflag_map { + int dotl_flag; + int open_flag; +}; + +static int dotl_to_open_flags(int flags) +{ + int i; + /* + * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY + * and P9_DOTL_NOACCESS + */ + int oflags = flags & O_ACCMODE; + + struct dotl_openflag_map dotl_oflag_map[] = { + { P9_DOTL_CREATE, O_CREAT }, + { P9_DOTL_EXCL, O_EXCL }, + { P9_DOTL_NOCTTY , O_NOCTTY }, + { P9_DOTL_TRUNC, O_TRUNC }, + { P9_DOTL_APPEND, O_APPEND }, + { P9_DOTL_NONBLOCK, O_NONBLOCK } , + { P9_DOTL_DSYNC, O_DSYNC }, + { P9_DOTL_FASYNC, FASYNC }, + { P9_DOTL_DIRECT, O_DIRECT }, + { P9_DOTL_LARGEFILE, O_LARGEFILE }, + { P9_DOTL_DIRECTORY, O_DIRECTORY }, + { P9_DOTL_NOFOLLOW, O_NOFOLLOW }, + { P9_DOTL_NOATIME, O_NOATIME }, + { P9_DOTL_SYNC, O_SYNC }, + }; + + for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) { + if (flags & dotl_oflag_map[i].dotl_flag) { + oflags |= dotl_oflag_map[i].open_flag; + } + } + + return oflags; +} + +void cred_init(FsCred *credp) +{ + credp->fc_uid = -1; + credp->fc_gid = -1; + credp->fc_mode = -1; + credp->fc_rdev = -1; +} + +static int get_dotl_openflags(V9fsState *s, int oflags) +{ + int flags; + /* + * Filter the client open flags + */ + flags = dotl_to_open_flags(oflags); + flags &= ~(O_NOCTTY | O_ASYNC | O_CREAT); + /* + * Ignore direct disk access hint until the server supports it. + */ + flags &= ~O_DIRECT; + + return flags; +} + +void v9fs_path_init(V9fsPath *path) +{ + path->data = NULL; + path->size = 0; +} + +void v9fs_path_free(V9fsPath *path) +{ + g_free(path->data); + path->data = NULL; + path->size = 0; +} + +void v9fs_path_copy(V9fsPath *lhs, V9fsPath *rhs) +{ + v9fs_path_free(lhs); + lhs->data = g_malloc(rhs->size); + memcpy(lhs->data, rhs->data, rhs->size); + lhs->size = rhs->size; +} + +int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath, + const char *name, V9fsPath *path) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int err; + err = s->ops->name_to_path(&s->ctx, dirpath, name, path); // local_name_to_path + if (err < 0) { + err = -errno; + } + return err; +} + +/* + * Return TRUE if s1 is an ancestor of s2. + * + * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d". + * As a special case, We treat s1 as ancestor of s2 if they are same! + */ +static int v9fs_path_is_ancestor(V9fsPath *s1, V9fsPath *s2) +{ + if (!strncmp(s1->data, s2->data, s1->size - 1)) { +#ifndef CONFIG_WIN32 + if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '/') { +#else + if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '\\') { +#endif + return 1; + } + } + return 0; +} + +static size_t v9fs_string_size(V9fsString *str) +{ + return str->size; +} + +/* + * returns 0 if fid got re-opened, 1 if not, < 0 on error */ +static int v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int err = 1; + if (f->fid_type == P9_FID_FILE) { + if (f->fs.fd == -1) { + do { + err = v9fs_co_open(pdu, f, f->open_flags); + } while (err == -EINTR && !pdu->cancelled); + } + } else if (f->fid_type == P9_FID_DIR) { + if (f->fs.dir == NULL) { + do { + err = v9fs_co_opendir(pdu, f); + } while (err == -EINTR && !pdu->cancelled); + } + } + return err; +} + +static V9fsFidState *get_fid(V9fsPDU *pdu, int32_t fid) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int err; + V9fsFidState *f; + V9fsState *s = pdu->s; + + for (f = s->fid_list; f; f = f->next) { + BUG_ON(f->clunked); + if (f->fid == fid) { + /* + * Update the fid ref upfront so that + * we don't get reclaimed when we yield + * in open later. + */ + f->ref++; + /* + * check whether we need to reopen the + * file. We might have closed the fd + * while trying to free up some file + * descriptors. + */ + err = v9fs_reopen_fid(pdu, f); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + f->ref--; + return NULL; + } + /* + * Mark the fid as referenced so that the LRU + * reclaim won't close the file descriptor + */ + f->flags |= FID_REFERENCED; + return f; + } + } + return NULL; +} + +static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid) +{ + V9fsFidState *f; + + for (f = s->fid_list; f; f = f->next) { + /* If fid is already there return NULL */ + BUG_ON(f->clunked); + if (f->fid == fid) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + return NULL; + } + } + f = g_malloc0(sizeof(V9fsFidState)); + f->fid = fid; + f->fid_type = P9_FID_NONE; + f->ref = 1; + /* + * Mark the fid as referenced so that the LRU + * reclaim won't close the file descriptor + */ + f->flags |= FID_REFERENCED; + f->next = s->fid_list; + s->fid_list = f; + + return f; +} + +#ifndef CONFIG_WIN32 +static int v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int retval = 0; + + if (fidp->fs.xattr.copied_len == -1) { + /* getxattr/listxattr fid */ + goto free_value; + } + /* + * if this is fid for setxattr. clunk should + * result in setxattr localcall + */ + if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) { + /* clunk after partial write */ + retval = -EINVAL; + goto free_out; + } + if (fidp->fs.xattr.len) { + retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name, + fidp->fs.xattr.value, + fidp->fs.xattr.len, + fidp->fs.xattr.flags); + } else { + retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name); + } +free_out: + v9fs_string_free(&fidp->fs.xattr.name); +free_value: + if (fidp->fs.xattr.value) { + g_free(fidp->fs.xattr.value); + } + return retval; +} +#endif + +static int free_fid(V9fsPDU *pdu, V9fsFidState *fidp) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int retval = 0; + + if (fidp->fid_type == P9_FID_FILE) { + /* If we reclaimed the fd no need to close */ + if (fidp->fs.fd != -1) { + retval = v9fs_co_close(pdu, &fidp->fs); + } + } else if (fidp->fid_type == P9_FID_DIR) { + if (fidp->fs.dir != NULL) { + retval = v9fs_co_closedir(pdu, &fidp->fs); + } + } else if (fidp->fid_type == P9_FID_XATTR) { +#ifndef CONFIG_WIN32 + retval = v9fs_xattr_fid_clunk(pdu, fidp); +#else + retval = -1; +#endif + } + v9fs_path_free(&fidp->path); + g_free(fidp); + return retval; +} + +static int put_fid(V9fsPDU *pdu, V9fsFidState *fidp) +{ + BUG_ON(!fidp->ref); + fidp->ref--; + /* + * Don't free the fid if it is in reclaim list + */ + if (!fidp->ref && fidp->clunked) { + if (fidp->fid == pdu->s->root_fid) { + /* + * if the clunked fid is root fid then we + * have unmounted the fs on the client side. + * delete the migration blocker. Ideally, this + * should be hooked to transport close notification + */ + if (pdu->s->migration_blocker) { + migrate_del_blocker(pdu->s->migration_blocker); + error_free(pdu->s->migration_blocker); + pdu->s->migration_blocker = NULL; + } + } + return free_fid(pdu, fidp); + } + return 0; +} + +static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid) +{ + V9fsFidState **fidpp, *fidp; + + for (fidpp = &s->fid_list; *fidpp; fidpp = &(*fidpp)->next) { + if ((*fidpp)->fid == fid) { + break; + } + } + if (*fidpp == NULL) { + return NULL; + } + fidp = *fidpp; + *fidpp = fidp->next; + fidp->clunked = 1; + return fidp; +} + +void v9fs_reclaim_fd(V9fsPDU *pdu) +{ + int reclaim_count = 0; + V9fsState *s = pdu->s; + V9fsFidState *f, *reclaim_list = NULL; + + for (f = s->fid_list; f; f = f->next) { + /* + * Unlink fids cannot be reclaimed. Check + * for them and skip them. Also skip fids + * currently being operated on. + */ + if (f->ref || f->flags & FID_NON_RECLAIMABLE) { + continue; + } + /* + * if it is a recently referenced fid + * we leave the fid untouched and clear the + * reference bit. We come back to it later + * in the next iteration. (a simple LRU without + * moving list elements around) + */ + if (f->flags & FID_REFERENCED) { + f->flags &= ~FID_REFERENCED; + continue; + } + /* + * Add fids to reclaim list. + */ + if (f->fid_type == P9_FID_FILE) { + if (f->fs.fd != -1) { + /* + * Up the reference count so that + * a clunk request won't free this fid + */ + f->ref++; + f->rclm_lst = reclaim_list; + reclaim_list = f; + f->fs_reclaim.fd = f->fs.fd; + f->fs.fd = -1; + reclaim_count++; + } + } else if (f->fid_type == P9_FID_DIR) { + if (f->fs.dir != NULL) { + /* + * Up the reference count so that + * a clunk request won't free this fid + */ + f->ref++; + f->rclm_lst = reclaim_list; + reclaim_list = f; + f->fs_reclaim.dir = f->fs.dir; + f->fs.dir = NULL; + reclaim_count++; + } + } + if (reclaim_count >= open_fd_rc) { + break; + } + } + /* + * Now close the fid in reclaim list. Free them if they + * are already clunked. + */ + while (reclaim_list) { + f = reclaim_list; + reclaim_list = f->rclm_lst; + if (f->fid_type == P9_FID_FILE) { + v9fs_co_close(pdu, &f->fs_reclaim); + } else if (f->fid_type == P9_FID_DIR) { + v9fs_co_closedir(pdu, &f->fs_reclaim); + } + f->rclm_lst = NULL; + /* + * Now drop the fid reference, free it + * if clunked. + */ + put_fid(pdu, f); + } +} + +static int v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path) +{ + TRACE("[%d][ >> %s]\n", __LINE__, __func__); + int err; + V9fsState *s = pdu->s; + V9fsFidState *fidp, head_fid; + + head_fid.next = s->fid_list; + for (fidp = s->fid_list; fidp; fidp = fidp->next) { + if (fidp->path.size != path->size) { + continue; + } + if (!memcmp(fidp->path.data, path->data, path->size)) { + /* Mark the fid non reclaimable. */ + fidp->flags |= FID_NON_RECLAIMABLE; + + /* reopen the file/dir if already closed */ + err = v9fs_reopen_fid(pdu, fidp); + if (err < 0) { + return -1; + } + /* + * Go back to head of fid list because + * the list could have got updated when + * switched to the worker thread + */ + if (err == 0) { + fidp = &head_fid; + } + } + } + return 0; +} + +static void virtfs_reset(V9fsPDU *pdu) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + V9fsState *s = pdu->s; + V9fsFidState *fidp = NULL; + + /* Free all fids */ + while (s->fid_list) { + fidp = s->fid_list; + s->fid_list = fidp->next; + + if (fidp->ref) { + fidp->clunked = 1; + } else { + free_fid(pdu, fidp); + } + } + if (fidp) { + /* One or more unclunked fids found... */ + error_report("9pfs:%s: One or more uncluncked fids " + "found during reset", __func__); + } + return; +} + +#define P9_QID_TYPE_DIR 0x80 +#define P9_QID_TYPE_SYMLINK 0x02 + +#define P9_STAT_MODE_DIR 0x80000000 +#define P9_STAT_MODE_APPEND 0x40000000 +#define P9_STAT_MODE_EXCL 0x20000000 +#define P9_STAT_MODE_MOUNT 0x10000000 +#define P9_STAT_MODE_AUTH 0x08000000 +#define P9_STAT_MODE_TMP 0x04000000 +#define P9_STAT_MODE_SYMLINK 0x02000000 +#define P9_STAT_MODE_LINK 0x01000000 +#define P9_STAT_MODE_DEVICE 0x00800000 +#define P9_STAT_MODE_NAMED_PIPE 0x00200000 +#define P9_STAT_MODE_SOCKET 0x00100000 +#define P9_STAT_MODE_SETUID 0x00080000 +#define P9_STAT_MODE_SETGID 0x00040000 +#define P9_STAT_MODE_SETVTX 0x00010000 + +#define P9_STAT_MODE_TYPE_BITS (P9_STAT_MODE_DIR | \ + P9_STAT_MODE_SYMLINK | \ + P9_STAT_MODE_LINK | \ + P9_STAT_MODE_DEVICE | \ + P9_STAT_MODE_NAMED_PIPE | \ + P9_STAT_MODE_SOCKET) + +/* This is the algorithm from ufs in spfs */ +static void stat_to_qid(const struct stat *stbuf, V9fsQID *qidp) +{ + size_t size; + + memset(&qidp->path, 0, sizeof(qidp->path)); + size = MIN(sizeof(stbuf->st_ino), sizeof(qidp->path)); + memcpy(&qidp->path, &stbuf->st_ino, size); + qidp->version = stbuf->st_mtime ^ (stbuf->st_size << 8); + qidp->type = 0; + if (S_ISDIR(stbuf->st_mode)) { + qidp->type |= P9_QID_TYPE_DIR; + } +#ifndef CONFIG_WIN32 + if (S_ISLNK(stbuf->st_mode)) { +#else + if (((stbuf->st_mode) & S_IFMT) == 0xA000) { +#endif + qidp->type |= P9_QID_TYPE_SYMLINK; + } +} + +static int fid_to_qid(V9fsPDU *pdu, V9fsFidState *fidp, V9fsQID *qidp) +{ + TRACE("[%d][ >> %s]\n", __LINE__, __func__); + struct stat stbuf; + int err; + + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + return err; + } + stat_to_qid(&stbuf, qidp); + return 0; +} + +static V9fsPDU *alloc_pdu(V9fsState *s) +{ + V9fsPDU *pdu = NULL; + + if (!QLIST_EMPTY(&s->free_list)) { + pdu = QLIST_FIRST(&s->free_list); + QLIST_REMOVE(pdu, next); + QLIST_INSERT_HEAD(&s->active_list, pdu, next); + } + return pdu; +} + +static void free_pdu(V9fsState *s, V9fsPDU *pdu) +{ + if (pdu) { + /* + * Cancelled pdu are added back to the freelist + * by flush request . + */ + if (!pdu->cancelled) { + QLIST_REMOVE(pdu, next); + QLIST_INSERT_HEAD(&s->free_list, pdu, next); + } + } +} + +/* + * We don't do error checking for pdu_marshal/unmarshal here + * because we always expect to have enough space to encode + * error details + */ +static void complete_pdu(V9fsState *s, V9fsPDU *pdu, ssize_t len) +{ + int8_t id = pdu->id + 1; /* Response */ + + if (len < 0) { + WARN("[%d][ >> %s]\n", __LINE__, __func__); + int err = -len; + len = 7; + + if (s->proto_version != V9FS_PROTO_2000L) { + V9fsString str; + + str.data = strerror(err); + str.size = strlen(str.data); + + len += pdu_marshal(pdu, len, "s", &str); + id = P9_RERROR; + } + + len += pdu_marshal(pdu, len, "d", err); + + if (s->proto_version == V9FS_PROTO_2000L) { + id = P9_RLERROR; + } + trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */ + } + + /* fill out the header */ + pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag); + + /* keep these in sync */ + pdu->size = len; + pdu->id = id; + + /* push onto queue and notify */ + virtqueue_push(s->vq, &pdu->elem, len); + + /* FIXME: we should batch these completions */ + virtio_notify(VIRTIO_DEVICE(s), s->vq); + + /* Now wakeup anybody waiting in flush for this request */ + qemu_co_queue_next(&pdu->complete); + + free_pdu(s, pdu); +} + +#ifndef CONFIG_WIN32 +static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension) +{ + mode_t ret; + + ret = mode & 0777; + if (mode & P9_STAT_MODE_DIR) { + ret |= S_IFDIR; + } + + if (mode & P9_STAT_MODE_SYMLINK) { + ret |= S_IFLNK; + } + if (mode & P9_STAT_MODE_SOCKET) { + ret |= S_IFSOCK; + } + if (mode & P9_STAT_MODE_NAMED_PIPE) { + ret |= S_IFIFO; + } + if (mode & P9_STAT_MODE_DEVICE) { + if (extension->size && extension->data[0] == 'c') { + ret |= S_IFCHR; + } else { + ret |= S_IFBLK; + } + } + + if (!(ret&~0777)) { + ret |= S_IFREG; + } + + if (mode & P9_STAT_MODE_SETUID) { + ret |= S_ISUID; + } + if (mode & P9_STAT_MODE_SETGID) { + ret |= S_ISGID; + } + if (mode & P9_STAT_MODE_SETVTX) { + ret |= S_ISVTX; + } + return ret; +} +#endif + +static int donttouch_stat(V9fsStat *stat) +{ + if (stat->type == -1 && + stat->dev == -1 && + stat->qid.type == -1 && + stat->qid.version == -1 && + stat->qid.path == -1 && + stat->mode == -1 && + stat->atime == -1 && + stat->mtime == -1 && + stat->length == -1 && + !stat->name.size && + !stat->uid.size && + !stat->gid.size && + !stat->muid.size && + stat->n_uid == -1 && + stat->n_gid == -1 && + stat->n_muid == -1) { + return 1; + } + + return 0; +} + +static void v9fs_stat_init(V9fsStat *stat) +{ + v9fs_string_init(&stat->name); + v9fs_string_init(&stat->uid); + v9fs_string_init(&stat->gid); + v9fs_string_init(&stat->muid); + v9fs_string_init(&stat->extension); +} + +static void v9fs_stat_free(V9fsStat *stat) +{ + v9fs_string_free(&stat->name); + v9fs_string_free(&stat->uid); + v9fs_string_free(&stat->gid); + v9fs_string_free(&stat->muid); + v9fs_string_free(&stat->extension); +} + +static uint32_t stat_to_v9mode(const struct stat *stbuf) +{ + uint32_t mode; + + mode = stbuf->st_mode & 0777; +#ifndef CONFIG_WIN32 + if (S_ISDIR(stbuf->st_mode)) { + mode |= P9_STAT_MODE_DIR; + } + + if (S_ISLNK(stbuf->st_mode)) { + mode |= P9_STAT_MODE_SYMLINK; + } + + if (S_ISSOCK(stbuf->st_mode)) { + mode |= P9_STAT_MODE_SOCKET; + } + + if (S_ISFIFO(stbuf->st_mode)) { + mode |= P9_STAT_MODE_NAMED_PIPE; + } + + if (S_ISBLK(stbuf->st_mode) || S_ISCHR(stbuf->st_mode)) { + mode |= P9_STAT_MODE_DEVICE; + } + + if (stbuf->st_mode & S_ISUID) { + mode |= P9_STAT_MODE_SETUID; + } + + if (stbuf->st_mode & S_ISGID) { + mode |= P9_STAT_MODE_SETGID; + } + + if (stbuf->st_mode & S_ISVTX) { + mode |= P9_STAT_MODE_SETVTX; + } +#endif + return mode; +} + +static int stat_to_v9stat(V9fsPDU *pdu, V9fsPath *name, + const struct stat *stbuf, + V9fsStat *v9stat) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int err; + const char *str; + + memset(v9stat, 0, sizeof(*v9stat)); + + stat_to_qid(stbuf, &v9stat->qid); + v9stat->mode = stat_to_v9mode(stbuf); + v9stat->atime = stbuf->st_atime; + v9stat->mtime = stbuf->st_mtime; + v9stat->length = stbuf->st_size; + + v9fs_string_null(&v9stat->uid); + v9fs_string_null(&v9stat->gid); + v9fs_string_null(&v9stat->muid); + + v9stat->n_uid = stbuf->st_uid; + v9stat->n_gid = stbuf->st_gid; + v9stat->n_muid = 0; + + v9fs_string_null(&v9stat->extension); + + if (v9stat->mode & P9_STAT_MODE_SYMLINK) { + err = v9fs_co_readlink(pdu, name, &v9stat->extension); + if (err < 0) { + return err; + } + } else if (v9stat->mode & P9_STAT_MODE_DEVICE) { + v9fs_string_sprintf(&v9stat->extension, "%c %u %u", + S_ISCHR(stbuf->st_mode) ? 'c' : 'b', + major(stbuf->st_rdev), minor(stbuf->st_rdev)); + } else if (S_ISDIR(stbuf->st_mode) || S_ISREG(stbuf->st_mode)) { + v9fs_string_sprintf(&v9stat->extension, "%s %lu", + "HARDLINKCOUNT", (unsigned long)stbuf->st_nlink); + } + + str = strrchr(name->data, '/'); + if (str) { + str += 1; + } else { + str = name->data; + } + + v9fs_string_sprintf(&v9stat->name, "%s", str); + + v9stat->size = 61 + + v9fs_string_size(&v9stat->name) + + v9fs_string_size(&v9stat->uid) + + v9fs_string_size(&v9stat->gid) + + v9fs_string_size(&v9stat->muid) + + v9fs_string_size(&v9stat->extension); + return 0; +} + +#define P9_STATS_MODE 0x00000001ULL +#define P9_STATS_NLINK 0x00000002ULL +#define P9_STATS_UID 0x00000004ULL +#define P9_STATS_GID 0x00000008ULL +#define P9_STATS_RDEV 0x00000010ULL +#define P9_STATS_ATIME 0x00000020ULL +#define P9_STATS_MTIME 0x00000040ULL +#define P9_STATS_CTIME 0x00000080ULL +#define P9_STATS_INO 0x00000100ULL +#define P9_STATS_SIZE 0x00000200ULL +#define P9_STATS_BLOCKS 0x00000400ULL + +#define P9_STATS_BTIME 0x00000800ULL +#define P9_STATS_GEN 0x00001000ULL +#define P9_STATS_DATA_VERSION 0x00002000ULL + +#define P9_STATS_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */ +#define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */ + + +static void stat_to_v9stat_dotl(V9fsState *s, const struct stat *stbuf, + V9fsStatDotl *v9lstat) +{ + memset(v9lstat, 0, sizeof(*v9lstat)); + + v9lstat->st_mode = stbuf->st_mode; + v9lstat->st_nlink = stbuf->st_nlink; + v9lstat->st_uid = stbuf->st_uid; + v9lstat->st_gid = stbuf->st_gid; + v9lstat->st_rdev = stbuf->st_rdev; + v9lstat->st_size = stbuf->st_size; +#ifndef CONFIG_WIN32 + v9lstat->st_blksize = stbuf->st_blksize; + v9lstat->st_blocks = stbuf->st_blocks; +#else + v9lstat->st_blksize = hostBytesPerSector; + v9lstat->st_blocks = (uint64_t)(v9lstat->st_size / 512 + + (v9lstat->st_size % 512 ? 1 : 0)); //round up +#endif +#ifdef CONFIG_LINUX + v9lstat->st_atime_sec = stbuf->st_atime; + v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec; + v9lstat->st_mtime_sec = stbuf->st_mtime; + v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec; + v9lstat->st_ctime_sec = stbuf->st_ctime; + v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec; +#else // darwin +#ifndef CONFIG_WIN32 + v9lstat->st_atime_sec = stbuf->st_atimespec.tv_sec; + v9lstat->st_atime_nsec = stbuf->st_atimespec.tv_nsec; + v9lstat->st_mtime_sec = stbuf->st_mtimespec.tv_sec; + v9lstat->st_mtime_nsec = stbuf->st_mtimespec.tv_nsec; + v9lstat->st_ctime_sec = stbuf->st_ctimespec.tv_sec; + v9lstat->st_ctime_nsec = stbuf->st_ctimespec.tv_nsec; +#else + v9lstat->st_atime_sec = stbuf->st_atime; + v9lstat->st_atime_nsec = 0; + v9lstat->st_mtime_sec = stbuf->st_mtime; + v9lstat->st_mtime_nsec = 0; + v9lstat->st_ctime_sec = stbuf->st_ctime; + v9lstat->st_ctime_nsec = 0; +#endif +#endif + + /* Currently we only support BASIC fields in stat */ + v9lstat->st_result_mask = P9_STATS_BASIC; + + stat_to_qid(stbuf, &v9lstat->qid); +} + +static void print_sg(struct iovec *sg, int cnt) +{ + int i; + + printf("sg[%d]: {", cnt); + for (i = 0; i < cnt; i++) { + if (i) { + printf(", "); + } + printf("(%p, %zd)", sg[i].iov_base, sg[i].iov_len); + } + printf("}\n"); +} + +/* Will call this only for path name based fid */ +static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len) +{ + V9fsPath str; + v9fs_path_init(&str); + v9fs_path_copy(&str, dst); + v9fs_string_sprintf((V9fsString *)dst, "%s%s", src->data, str.data+len); + v9fs_path_free(&str); + /* +1 to include terminating NULL */ + dst->size++; +} + +static inline bool is_ro_export(FsContext *ctx) +{ + return ctx->export_flags & V9FS_RDONLY; +} + +static void v9fs_version(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + ssize_t err; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + V9fsString version; + size_t offset = 7; + + v9fs_string_init(&version); + err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version); + if (err < 0) { + offset = err; + goto out; + } + trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data); + + virtfs_reset(pdu); + + if (!strcmp(version.data, "9P2000.u")) { + s->proto_version = V9FS_PROTO_2000U; + } else if (!strcmp(version.data, "9P2000.L")) { + s->proto_version = V9FS_PROTO_2000L; + } else { + v9fs_string_sprintf(&version, "unknown"); + } + + err = pdu_marshal(pdu, offset, "ds", s->msize, &version); + if (err < 0) { + offset = err; + goto out; + } + offset += err; + trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data); +out: + complete_pdu(s, pdu, offset); + v9fs_string_free(&version); + return; +} + +static void v9fs_attach(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + int32_t fid, afid, n_uname; + V9fsString uname, aname; + V9fsFidState *fidp; + size_t offset = 7; + V9fsQID qid; + ssize_t err; + + v9fs_string_init(&uname); + v9fs_string_init(&aname); + err = pdu_unmarshal(pdu, offset, "ddssd", &fid, + &afid, &uname, &aname, &n_uname); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_attach(pdu->tag, pdu->id, fid, afid, uname.data, aname.data); + + fidp = alloc_fid(s, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + fidp->uid = n_uname; +#ifndef CONFIG_WIN32 + err = v9fs_co_name_to_path(pdu, NULL, "/", &fidp->path); +#else + err = v9fs_co_name_to_path(pdu, NULL, "\\", &fidp->path); +#endif + if (err < 0) { + err = -EINVAL; + clunk_fid(s, fid); + goto out; + } + err = fid_to_qid(pdu, fidp, &qid); + if (err < 0) { + err = -EINVAL; + clunk_fid(s, fid); + goto out; + } + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + clunk_fid(s, fid); + goto out; + } + err += offset; + trace_v9fs_attach_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); + /* + * disable migration if we haven't done already. + * attach could get called multiple times for the same export. + */ + if (!s->migration_blocker) { + s->root_fid = fid; - error_set(&s->migration_blocker, QERR_VIRTFS_FEATURE_BLOCKS_MIGRATION, - s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag); ++ error_setg(&s->migration_blocker, ++ "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'", ++ s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag); + migrate_add_blocker(s->migration_blocker); + } +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); + v9fs_string_free(&uname); + v9fs_string_free(&aname); +} + +static void v9fs_stat(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + V9fsStat v9stat; + ssize_t err = 0; + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "d", &fid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_stat(pdu->tag, pdu->id, fid); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + goto out; + } + err = stat_to_v9stat(pdu, &fidp->path, &stbuf, &v9stat); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "wS", 0, &v9stat); + if (err < 0) { + v9fs_stat_free(&v9stat); + goto out; + } + trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode, + v9stat.atime, v9stat.mtime, v9stat.length); + err += offset; + v9fs_stat_free(&v9stat); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); +} + +static void v9fs_getattr(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + size_t offset = 7; + ssize_t retval = 0; + struct stat stbuf; + V9fsFidState *fidp; + uint64_t request_mask; + V9fsStatDotl v9stat_dotl; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + retval = pdu_unmarshal(pdu, offset, "dq", &fid, &request_mask); + if (retval < 0) { + goto out_nofid; + } + trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + retval = -ENOENT; + goto out_nofid; + } + /* + * Currently we only support BASIC fields in stat, so there is no + * need to look at request_mask. + */ + retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (retval < 0) { + goto out; + } + stat_to_v9stat_dotl(s, &stbuf, &v9stat_dotl); + + /* fill st_gen if requested and supported by underlying fs */ + if (request_mask & P9_STATS_GEN) { + retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl); + if (retval < 0) { + goto out; + } + v9stat_dotl.st_result_mask |= P9_STATS_GEN; + } + retval = pdu_marshal(pdu, offset, "A", &v9stat_dotl); + if (retval < 0) { + goto out; + } + retval += offset; + trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask, + v9stat_dotl.st_mode, v9stat_dotl.st_uid, + v9stat_dotl.st_gid); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, retval); +} + +/* Attribute flags */ +#define P9_ATTR_MODE (1 << 0) +#define P9_ATTR_UID (1 << 1) +#define P9_ATTR_GID (1 << 2) +#define P9_ATTR_SIZE (1 << 3) +#define P9_ATTR_ATIME (1 << 4) +#define P9_ATTR_MTIME (1 << 5) +#define P9_ATTR_CTIME (1 << 6) +#define P9_ATTR_ATIME_SET (1 << 7) +#define P9_ATTR_MTIME_SET (1 << 8) + +#define P9_ATTR_MASK 127 + +static void v9fs_setattr(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int err = 0; + int32_t fid; + V9fsFidState *fidp; + size_t offset = 7; + V9fsIattr v9iattr; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "dI", &fid, &v9iattr); + if (err < 0) { + goto out_nofid; + } + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + if (v9iattr.valid & P9_ATTR_MODE) { +#ifndef CONFIG_WIN32 + err = v9fs_co_chmod(pdu, &fidp->path, v9iattr.mode); +#else + err = -EPERM; +#endif + if (err < 0) { + goto out; + } + } + if (v9iattr.valid & (P9_ATTR_ATIME | P9_ATTR_MTIME)) { + struct timespec times[2]; + if (v9iattr.valid & P9_ATTR_ATIME) { + if (v9iattr.valid & P9_ATTR_ATIME_SET) { + times[0].tv_sec = v9iattr.atime_sec; + times[0].tv_nsec = v9iattr.atime_nsec; + } else { + times[0].tv_nsec = UTIME_NOW; + } + } else { + times[0].tv_nsec = UTIME_OMIT; + } + if (v9iattr.valid & P9_ATTR_MTIME) { + if (v9iattr.valid & P9_ATTR_MTIME_SET) { + times[1].tv_sec = v9iattr.mtime_sec; + times[1].tv_nsec = v9iattr.mtime_nsec; + } else { + times[1].tv_nsec = UTIME_NOW; + } + } else { + times[1].tv_nsec = UTIME_OMIT; + } + err = v9fs_co_utimensat(pdu, &fidp->path, times); + if (err < 0) { + goto out; + } + } + /* + * If the only valid entry in iattr is ctime we can call + * chown(-1,-1) to update the ctime of the file + */ + if ((v9iattr.valid & (P9_ATTR_UID | P9_ATTR_GID)) || + ((v9iattr.valid & P9_ATTR_CTIME) + && !((v9iattr.valid & P9_ATTR_MASK) & ~P9_ATTR_CTIME))) { + if (!(v9iattr.valid & P9_ATTR_UID)) { + v9iattr.uid = -1; + } + if (!(v9iattr.valid & P9_ATTR_GID)) { + v9iattr.gid = -1; + } +#ifndef CONFIG_WIN32 + err = v9fs_co_chown(pdu, &fidp->path, v9iattr.uid, + v9iattr.gid); +#else + err = -EPERM; +#endif + if (err < 0) { + goto out; + } + } + if (v9iattr.valid & (P9_ATTR_SIZE)) { + err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size); + if (err < 0) { + goto out; + } + } + err = offset; +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); +} + +static int v9fs_walk_marshal(V9fsPDU *pdu, uint16_t nwnames, V9fsQID *qids) +{ + int i; + ssize_t err; + size_t offset = 7; + + err = pdu_marshal(pdu, offset, "w", nwnames); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + return err; + } + offset += err; + for (i = 0; i < nwnames; i++) { + err = pdu_marshal(pdu, offset, "Q", &qids[i]); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + return err; + } + offset += err; + } + return offset; +} + +static void v9fs_walk(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int name_idx; + V9fsQID *qids = NULL; + int i, err = 0; + V9fsPath dpath, path; + uint16_t nwnames; + struct stat stbuf; + size_t offset = 7; + int32_t fid, newfid; + V9fsString *wnames = NULL; + V9fsFidState *fidp; + V9fsFidState *newfidp = NULL; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + complete_pdu(s, pdu, err); + return ; + } + offset += err; + + trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames); + + if (nwnames && nwnames <= P9_MAXWELEM) { + wnames = g_malloc0(sizeof(wnames[0]) * nwnames); + qids = g_malloc0(sizeof(qids[0]) * nwnames); + for (i = 0; i < nwnames; i++) { + err = pdu_unmarshal(pdu, offset, "s", &wnames[i]); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out_nofid; + } + offset += err; + } + } else if (nwnames > P9_MAXWELEM) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + goto out_nofid; + } + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -ENOENT; + goto out_nofid; + } + v9fs_path_init(&dpath); + v9fs_path_init(&path); + /* + * Both dpath and path initially poin to fidp. + * Needed to handle request with nwnames == 0 + */ + v9fs_path_copy(&dpath, &fidp->path); + v9fs_path_copy(&path, &fidp->path); + for (name_idx = 0; name_idx < nwnames; name_idx++) { + err = v9fs_co_name_to_path(pdu, &dpath, wnames[name_idx].data, &path); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = v9fs_co_lstat(pdu, &path, &stbuf); + if (err < 0) { + goto out; + } + stat_to_qid(&stbuf, &qids[name_idx]); + v9fs_path_copy(&dpath, &path); + } + if (fid == newfid) { + BUG_ON(fidp->fid_type != P9_FID_NONE); + WARN("[%d][ >> %s]\n", __LINE__, __func__); + v9fs_path_copy(&fidp->path, &path); + } else { + newfidp = alloc_fid(s, newfid); + if (newfidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + goto out; + } + newfidp->uid = fidp->uid; + v9fs_path_copy(&newfidp->path, &path); + } + err = v9fs_walk_marshal(pdu, nwnames, qids); + trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids); +out: + put_fid(pdu, fidp); + if (newfidp) { + put_fid(pdu, newfidp); + } + v9fs_path_free(&dpath); + v9fs_path_free(&path); +out_nofid: + complete_pdu(s, pdu, err); + if (nwnames && nwnames <= P9_MAXWELEM) { + for (name_idx = 0; name_idx < nwnames; name_idx++) { + v9fs_string_free(&wnames[name_idx]); + } + g_free(wnames); + g_free(qids); + } + return; +} + +static int32_t get_iounit(V9fsPDU *pdu, V9fsPath *path) +{ + struct statfs stbuf; + int32_t iounit = 0; + V9fsState *s = pdu->s; + + /* + * iounit should be multiples of f_bsize (host filesystem block size + * and as well as less than (client msize - P9_IOHDRSZ)) + */ + if (!v9fs_co_statfs(pdu, path, &stbuf)) { + iounit = stbuf.f_bsize; + iounit *= (s->msize - P9_IOHDRSZ)/stbuf.f_bsize; + } + if (!iounit) { + iounit = s->msize - P9_IOHDRSZ; + } + return iounit; +} + +static void v9fs_open(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int flags; + int32_t fid; + int32_t mode; + V9fsQID qid; + int iounit = 0; + ssize_t err = 0; + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + if (s->proto_version == V9FS_PROTO_2000L) { + err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode); + } else { + uint8_t modebyte; + err = pdu_unmarshal(pdu, offset, "db", &fid, &modebyte); + mode = modebyte; + } + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out_nofid; + } + trace_v9fs_open(pdu->tag, pdu->id, fid, mode); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -ENOENT; + goto out_nofid; + } + BUG_ON(fidp->fid_type != P9_FID_NONE); + + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + stat_to_qid(&stbuf, &qid); + if (S_ISDIR(stbuf.st_mode)) { + err = v9fs_co_opendir(pdu, fidp); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + fidp->fid_type = P9_FID_DIR; + err = pdu_marshal(pdu, offset, "Qd", &qid, 0); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err += offset; + } else { + if (s->proto_version == V9FS_PROTO_2000L) { + flags = get_dotl_openflags(s, mode); + } else { + flags = omode_to_uflags(mode); + } + if (is_ro_export(&s->ctx)) { + if (mode & O_WRONLY || mode & O_RDWR || + mode & O_APPEND || mode & O_TRUNC) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EROFS; + goto out; + } + } + err = v9fs_co_open(pdu, fidp, flags); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + fidp->fid_type = P9_FID_FILE; + fidp->open_flags = flags; + if (flags & O_EXCL) { + /* + * We let the host file system do O_EXCL check + * We should not reclaim such fd + */ + fidp->flags |= FID_NON_RECLAIMABLE; + } + iounit = get_iounit(pdu, &fidp->path); + err = pdu_marshal(pdu, offset, "Qd", &qid, iounit); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err += offset; + } + trace_v9fs_open_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); +} + +static void v9fs_lcreate(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t dfid, flags, mode; + gid_t gid; + ssize_t err = 0; + ssize_t offset = 7; + V9fsString name; + V9fsFidState *fidp; + struct stat stbuf; + V9fsQID qid; + int32_t iounit; + V9fsPDU *pdu = opaque; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsddd", &dfid, + &name, &flags, &mode, &gid); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out_nofid; + } + trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid); + + fidp = get_fid(pdu, dfid); + if (fidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -ENOENT; + goto out_nofid; + } + + flags = get_dotl_openflags(pdu->s, flags); + err = v9fs_co_open2(pdu, fidp, &name, gid, + flags | O_CREAT, mode, &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + fidp->fid_type = P9_FID_FILE; + fidp->open_flags = flags; + if (flags & O_EXCL) { + /* + * We let the host file system do O_EXCL check + * We should not reclaim such fd + */ + fidp->flags |= FID_NON_RECLAIMABLE; + } + iounit = get_iounit(pdu, &fidp->path); + stat_to_qid(&stbuf, &qid); + err = pdu_marshal(pdu, offset, "Qd", &qid, iounit); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err += offset; + trace_v9fs_lcreate_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(pdu->s, pdu, err); + v9fs_string_free(&name); +} + +static void v9fs_fsync(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int err; + int32_t fid; + int datasync; + size_t offset = 7; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "dd", &fid, &datasync); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out_nofid; + } + trace_v9fs_fsync(pdu->tag, pdu->id, fid, datasync); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out_nofid; + } + err = v9fs_co_fsync(pdu, fidp, datasync); + if (!err) { + err = offset; + } + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); +} + +static void v9fs_clunk(void *opaque) +{ + int err; + int32_t fid; + size_t offset = 7; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "d", &fid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_clunk(pdu->tag, pdu->id, fid); + + fidp = clunk_fid(s, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + /* + * Bump the ref so that put_fid will + * free the fid. + */ + fidp->ref++; + err = put_fid(pdu, fidp); + if (!err) { + err = offset; + } +out_nofid: + complete_pdu(s, pdu, err); +} + +#ifndef CONFIG_WIN32 +static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp, + uint64_t off, uint32_t max_count) +{ + ssize_t err; + size_t offset = 7; + int read_count; + int64_t xattr_len; + + xattr_len = fidp->fs.xattr.len; + read_count = xattr_len - off; + if (read_count > max_count) { + read_count = max_count; + } else if (read_count < 0) { + /* + * read beyond XATTR value + */ + read_count = 0; + } + err = pdu_marshal(pdu, offset, "d", read_count); + if (err < 0) { + return err; + } + offset += err; + err = v9fs_pack(pdu->elem.in_sg, pdu->elem.in_num, offset, + ((char *)fidp->fs.xattr.value) + off, + read_count); + if (err < 0) { + return err; + } + offset += err; + return offset; +} +#endif + +static int v9fs_do_readdir_with_stat(V9fsPDU *pdu, + V9fsFidState *fidp, uint32_t max_count) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + V9fsPath path; + V9fsStat v9stat; + int len, err = 0; + int32_t count = 0; + struct stat stbuf; + off_t saved_dir_pos; + struct dirent *dent, *result; + + /* save the directory position */ + saved_dir_pos = v9fs_co_telldir(pdu, fidp); + if (saved_dir_pos < 0) { + return saved_dir_pos; + } + + dent = g_malloc(sizeof(struct dirent)); + + while (1) { + v9fs_path_init(&path); + err = v9fs_co_readdir_r(pdu, fidp, dent, &result); + if (err || !result) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + break; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = v9fs_co_lstat(pdu, &path, &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = stat_to_v9stat(pdu, &path, &stbuf, &v9stat); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */ + len = pdu_marshal(pdu, 11 + count, "S", &v9stat); + if ((len != (v9stat.size + 2)) || ((count + len) > max_count)) { + /* Ran out of buffer. Set dir back to old position and return */ + v9fs_co_seekdir(pdu, fidp, saved_dir_pos); + v9fs_stat_free(&v9stat); + v9fs_path_free(&path); + g_free(dent); + return count; + } + count += len; + v9fs_stat_free(&v9stat); + v9fs_path_free(&path); +#ifdef CONFIG_LINUX + saved_dir_pos = dent->d_off; +#else + saved_dir_pos = v9fs_co_telldir(pdu, fidp); +#endif + } +out: + g_free(dent); + v9fs_path_free(&path); + if (err < 0) { + return err; + } + return count; +} + +/* + * Create a QEMUIOVector for a sub-region of PDU iovecs + * + * @qiov: uninitialized QEMUIOVector + * @skip: number of bytes to skip from beginning of PDU + * @size: number of bytes to include + * @is_write: true - write, false - read + * + * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up + * with qemu_iovec_destroy(). + */ +static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu, + size_t skip, size_t size, + bool is_write) +{ + QEMUIOVector elem; + struct iovec *iov; + unsigned int niov; + + if (is_write) { + iov = pdu->elem.out_sg; + niov = pdu->elem.out_num; + } else { + iov = pdu->elem.in_sg; + niov = pdu->elem.in_num; + } + + qemu_iovec_init_external(&elem, iov, niov); + qemu_iovec_init(qiov, niov); + qemu_iovec_concat(qiov, &elem, skip, size); +} + +static void v9fs_read(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + uint64_t off; + ssize_t err = 0; + int32_t count = 0; + size_t offset = 7; + uint32_t max_count; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out_nofid; + } + trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + goto out_nofid; + } + if (fidp->fid_type == P9_FID_DIR) { + + if (off == 0) { + v9fs_co_rewinddir(pdu, fidp); + } + count = v9fs_do_readdir_with_stat(pdu, fidp, max_count); + if (count < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = count; + goto out; + } + err = pdu_marshal(pdu, offset, "d", count); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err += offset + count; + } else if (fidp->fid_type == P9_FID_FILE) { + QEMUIOVector qiov_full; + QEMUIOVector qiov; + int32_t len; + + v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false); + qemu_iovec_init(&qiov, qiov_full.niov); + do { + qemu_iovec_reset(&qiov); + qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count); + if (0) { + print_sg(qiov.iov, qiov.niov); + } + /* Loop in case of EINTR */ + do { + len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off); + if (len >= 0) { + off += len; + count += len; + } + } while (len == -EINTR && !pdu->cancelled); + if (len < 0) { + /* IO error return the error */ + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = len; + goto out; + } + } while (count < max_count && len > 0); + err = pdu_marshal(pdu, offset, "d", count); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err += offset + count; + qemu_iovec_destroy(&qiov); + qemu_iovec_destroy(&qiov_full); +#ifndef CONFIG_WIN32 + } else if (fidp->fid_type == P9_FID_XATTR) { + err = v9fs_xattr_read(s, pdu, fidp, off, max_count); +#endif + } else { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + } + trace_v9fs_read_return(pdu->tag, pdu->id, count, err); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); +} + +static size_t v9fs_readdir_data_size(V9fsString *name) +{ + /* + * Size of each dirent on the wire: size of qid (13) + size of offset (8) + * size of type (1) + size of name.size (2) + strlen(name.data) + */ + return 24 + v9fs_string_size(name); +} + +static int v9fs_do_readdir(V9fsPDU *pdu, + V9fsFidState *fidp, int32_t max_count) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + size_t size; + V9fsQID qid; + V9fsString name; + int len, err = 0; + int32_t count = 0; + off_t saved_dir_pos; + struct dirent *dent, *result; +#ifndef CONFIG_LINUX + uint64_t d_offset = 0; +#endif + + /* save the directory position */ + saved_dir_pos = v9fs_co_telldir(pdu, fidp); + if (saved_dir_pos < 0) { + return saved_dir_pos; + } + + dent = g_malloc(sizeof(struct dirent)); + + while (1) { + err = v9fs_co_readdir_r(pdu, fidp, dent, &result); + dent = result; + if (err) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + break; + } + + if (!result) { + TRACE("[%d][ >> %s] v9fs_co_readdir_r has no result.\n", __LINE__, __func__); + break; + } + + v9fs_string_init(&name); + v9fs_string_sprintf(&name, "%s", dent->d_name); + if ((count + v9fs_readdir_data_size(&name)) > max_count) { + /* Ran out of buffer. Set dir back to old position and return */ + v9fs_co_seekdir(pdu, fidp, saved_dir_pos); + v9fs_string_free(&name); + g_free(dent); + return count; + } + /* + * Fill up just the path field of qid because the client uses + * only that. To fill the entire qid structure we will have + * to stat each dirent found, which is expensive + */ + size = MIN(sizeof(dent->d_ino), sizeof(qid.path)); + memcpy(&qid.path, &dent->d_ino, size); + /* Fill the other fields with dummy values */ + qid.type = 0; + qid.version = 0; + + /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */ +#ifdef CONFIG_LINUX + len = pdu_marshal(pdu, 11 + count, "Qqbs", + &qid, dent->d_off, + dent->d_type, &name); +#else + d_offset = v9fs_co_telldir(pdu, fidp); +#ifndef CONFIG_WIN32 + len = pdu_marshal(pdu, 11 + count, "Qqbs", + &qid, d_offset, + dent->d_type, &name); +#else + len = pdu_marshal(pdu, 11 + count, "Qqbs", + &qid, d_offset, + WIN32_D_TYPE, &name); +#endif +#endif + if (len < 0) { + v9fs_co_seekdir(pdu, fidp, saved_dir_pos); + v9fs_string_free(&name); + g_free(dent); + return len; + } + count += len; + v9fs_string_free(&name); +#ifdef CONFIG_LINUX + saved_dir_pos = dent->d_off; +#else + saved_dir_pos = d_offset; +#endif + } + g_free(dent); + if (err < 0) { + return err; + } + return count; +} + +static void v9fs_readdir(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + V9fsFidState *fidp; + ssize_t retval = 0; + size_t offset = 7; + uint64_t initial_offset; + int32_t count; + uint32_t max_count; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + retval = pdu_unmarshal(pdu, offset, "dqd", &fid, + &initial_offset, &max_count); + if (retval < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out_nofid; + } + trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + retval = -EINVAL; + goto out_nofid; + } + if (!fidp->fs.dir) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + retval = -EINVAL; + goto out; + } + if (initial_offset == 0) { + v9fs_co_rewinddir(pdu, fidp); + } else { + v9fs_co_seekdir(pdu, fidp, initial_offset); + } + count = v9fs_do_readdir(pdu, fidp, max_count); + if (count < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + retval = count; + goto out; + } + retval = pdu_marshal(pdu, offset, "d", count); + if (retval < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + retval += count + offset; + trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, retval); +} + +#ifndef CONFIG_WIN32 +static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp, + uint64_t off, uint32_t count, + struct iovec *sg, int cnt) +{ + int i, to_copy; + ssize_t err = 0; + int write_count; + int64_t xattr_len; + size_t offset = 7; + + + xattr_len = fidp->fs.xattr.len; + write_count = xattr_len - off; + if (write_count > count) { + write_count = count; + } else if (write_count < 0) { + /* + * write beyond XATTR value len specified in + * xattrcreate + */ + err = -ENOSPC; + goto out; + } + err = pdu_marshal(pdu, offset, "d", write_count); + if (err < 0) { + return err; + } + err += offset; + fidp->fs.xattr.copied_len += write_count; + /* + * Now copy the content from sg list + */ + for (i = 0; i < cnt; i++) { + if (write_count > sg[i].iov_len) { + to_copy = sg[i].iov_len; + } else { + to_copy = write_count; + } + memcpy((char *)fidp->fs.xattr.value + off, sg[i].iov_base, to_copy); + /* updating vs->off since we are not using below */ + off += to_copy; + write_count -= to_copy; + } +out: + return err; +} +#endif +static void v9fs_write(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + ssize_t err; + int32_t fid; + uint64_t off; + uint32_t count; + int32_t len = 0; + int32_t total = 0; + size_t offset = 7; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + QEMUIOVector qiov_full; + QEMUIOVector qiov; + + err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &count); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + return complete_pdu(s, pdu, err); + } + offset += err; + v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true); + trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + goto out_nofid; + } + if (fidp->fid_type == P9_FID_FILE) { + if (fidp->fs.fd == -1) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + goto out; + } + } else if (fidp->fid_type == P9_FID_XATTR) { + /* + * setxattr operation + */ +#ifndef CONFIG_WIN32 + err = v9fs_xattr_write(s, pdu, fidp, off, count, + qiov_full.iov, qiov_full.niov); +#endif + goto out; + } else { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + goto out; + } + qemu_iovec_init(&qiov, qiov_full.niov); + do { + qemu_iovec_reset(&qiov); + qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total); + if (0) { + print_sg(qiov.iov, qiov.niov); + } + /* Loop in case of EINTR */ + do { + len = v9fs_co_pwritev(pdu, fidp, qiov.iov, qiov.niov, off); + if (len >= 0) { + off += len; + total += len; + } + } while (len == -EINTR && !pdu->cancelled); + if (len < 0) { + /* IO error return the error */ + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = len; + goto out_qiov; + } + } while (total < count && len > 0); + + offset = 7; + err = pdu_marshal(pdu, offset, "d", total); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err += offset; + trace_v9fs_write_return(pdu->tag, pdu->id, total, err); +out_qiov: + qemu_iovec_destroy(&qiov); +out: + put_fid(pdu, fidp); +out_nofid: + qemu_iovec_destroy(&qiov_full); + complete_pdu(s, pdu, err); +} + +static void v9fs_create(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + int err = 0; + size_t offset = 7; + V9fsFidState *fidp; + V9fsQID qid; + int32_t perm; + int8_t mode; + V9fsPath path; + struct stat stbuf; + V9fsString name; + V9fsString extension; + int iounit; + V9fsPDU *pdu = opaque; + + v9fs_path_init(&path); + v9fs_string_init(&name); + v9fs_string_init(&extension); + err = pdu_unmarshal(pdu, offset, "dsdbs", &fid, &name, + &perm, &mode, &extension); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out_nofid; + } + trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + goto out_nofid; + } + if (perm & P9_STAT_MODE_DIR) { + err = v9fs_co_mkdir(pdu, fidp, &name, perm & 0777, + fidp->uid, -1, &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + v9fs_path_copy(&fidp->path, &path); + err = v9fs_co_opendir(pdu, fidp); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + fidp->fid_type = P9_FID_DIR; +#ifndef CONFIG_WIN32 + } else if (perm & P9_STAT_MODE_SYMLINK) { + err = v9fs_co_symlink(pdu, fidp, &name, + extension.data, -1 , &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + v9fs_path_copy(&fidp->path, &path); + } else if (perm & P9_STAT_MODE_LINK) { + int32_t ofid = atoi(extension.data); + V9fsFidState *ofidp = get_fid(pdu, ofid); + if (ofidp == NULL) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -EINVAL; + goto out; + } + err = v9fs_co_link(pdu, ofidp, fidp, &name); + put_fid(pdu, ofidp); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + fidp->fid_type = P9_FID_NONE; + goto out; + } + v9fs_path_copy(&fidp->path, &path); + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + fidp->fid_type = P9_FID_NONE; + goto out; + } + } else if (perm & P9_STAT_MODE_DEVICE) { + char ctype; + uint32_t major, minor; + mode_t nmode = 0; + + if (sscanf(extension.data, "%c %u %u", &ctype, &major, &minor) != 3) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + err = -errno; + goto out; + } + + switch (ctype) { + case 'c': + nmode = S_IFCHR; + break; + case 'b': + nmode = S_IFBLK; + break; + default: + err = -EIO; + goto out; + } + + nmode |= perm & 0777; + err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, + makedev(major, minor), nmode, &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + v9fs_path_copy(&fidp->path, &path); + } else if (perm & P9_STAT_MODE_NAMED_PIPE) { + err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, + 0, S_IFIFO | (perm & 0777), &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + v9fs_path_copy(&fidp->path, &path); + } else if (perm & P9_STAT_MODE_SOCKET) { + err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, + 0, S_IFSOCK | (perm & 0777), &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + v9fs_path_copy(&fidp->path, &path); +#endif + } else { + err = v9fs_co_open2(pdu, fidp, &name, -1, + omode_to_uflags(mode)|O_CREAT, perm, &stbuf); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + fidp->fid_type = P9_FID_FILE; + fidp->open_flags = omode_to_uflags(mode); + if (fidp->open_flags & O_EXCL) { + /* + * We let the host file system do O_EXCL check + * We should not reclaim such fd + */ + fidp->flags |= FID_NON_RECLAIMABLE; + } + } + iounit = get_iounit(pdu, &fidp->path); + stat_to_qid(&stbuf, &qid); + err = pdu_marshal(pdu, offset, "Qd", &qid, iounit); + if (err < 0) { + ERR("[%d][ >> %s]\n", __LINE__, __func__); + goto out; + } + err += offset; + trace_v9fs_create_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(pdu->s, pdu, err); + v9fs_string_free(&name); + v9fs_string_free(&extension); + v9fs_path_free(&path); +} + +static void v9fs_symlink(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + V9fsPDU *pdu = opaque; + V9fsString name; + V9fsString symname; + V9fsFidState *dfidp; + V9fsQID qid; + struct stat stbuf; + int32_t dfid; + int err = 0; + gid_t gid; + size_t offset = 7; + + v9fs_string_init(&name); + v9fs_string_init(&symname); + err = pdu_unmarshal(pdu, offset, "dssd", &dfid, &name, &symname, &gid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_symlink(pdu->tag, pdu->id, dfid, name.data, symname.data, gid); + + dfidp = get_fid(pdu, dfid); + if (dfidp == NULL) { + err = -EINVAL; + goto out_nofid; + } +#ifndef CONFIG_WIN32 + err = v9fs_co_symlink(pdu, dfidp, &name, symname.data, gid, &stbuf); +#else + err = -EPERM; +#endif + if (err < 0) { + goto out; + } + stat_to_qid(&stbuf, &qid); + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_symlink_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); +out: + put_fid(pdu, dfidp); +out_nofid: + complete_pdu(pdu->s, pdu, err); + v9fs_string_free(&name); + v9fs_string_free(&symname); +} + +static void v9fs_flush(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + ssize_t err; + int16_t tag; + size_t offset = 7; + V9fsPDU *cancel_pdu; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "w", &tag); + if (err < 0) { + complete_pdu(s, pdu, err); + return; + } + trace_v9fs_flush(pdu->tag, pdu->id, tag); + + QLIST_FOREACH(cancel_pdu, &s->active_list, next) { + if (cancel_pdu->tag == tag) { + break; + } + } + if (cancel_pdu) { + cancel_pdu->cancelled = 1; + /* + * Wait for pdu to complete. + */ + qemu_co_queue_wait(&cancel_pdu->complete); + cancel_pdu->cancelled = 0; + free_pdu(pdu->s, cancel_pdu); + } + complete_pdu(s, pdu, 7); + return; +} + +static void v9fs_link(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + int32_t dfid, oldfid; + V9fsFidState *dfidp, *oldfidp; + V9fsString name; + size_t offset = 7; + int err = 0; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data); + + dfidp = get_fid(pdu, dfid); + if (dfidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + + oldfidp = get_fid(pdu, oldfid); + if (oldfidp == NULL) { + err = -ENOENT; + goto out; + } + err = v9fs_co_link(pdu, oldfidp, dfidp, &name); + if (!err) { + err = offset; + } +out: + put_fid(pdu, dfidp); +out_nofid: + v9fs_string_free(&name); + complete_pdu(s, pdu, err); +} + +/* Only works with path name based fid */ +static void v9fs_remove(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + int err = 0; + size_t offset = 7; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + + err = pdu_unmarshal(pdu, offset, "d", &fid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_remove(pdu->tag, pdu->id, fid); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + /* if fs driver is not path based, return EOPNOTSUPP */ + if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) { + err = -EOPNOTSUPP; + goto out_err; + } + /* + * IF the file is unlinked, we cannot reopen + * the file later. So don't reclaim fd + */ + err = v9fs_mark_fids_unreclaim(pdu, &fidp->path); + if (err < 0) { + goto out_err; + } + err = v9fs_co_remove(pdu, &fidp->path); + if (!err) { + err = offset; + } +out_err: + /* For TREMOVE we need to clunk the fid even on failed remove */ + clunk_fid(pdu->s, fidp->fid); + put_fid(pdu, fidp); +out_nofid: + complete_pdu(pdu->s, pdu, err); +} + +static void v9fs_unlinkat(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int err = 0; + V9fsString name; + int32_t dfid, flags; + size_t offset = 7; + V9fsPath path; + V9fsFidState *dfidp; + V9fsPDU *pdu = opaque; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags); + if (err < 0) { + goto out_nofid; + } + dfidp = get_fid(pdu, dfid); + if (dfidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + /* + * IF the file is unlinked, we cannot reopen + * the file later. So don't reclaim fd + */ + v9fs_path_init(&path); + err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path); + if (err < 0) { + goto out_err; + } + err = v9fs_mark_fids_unreclaim(pdu, &path); + if (err < 0) { + goto out_err; + } + err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, flags); + if (!err) { + err = offset; + } +out_err: + put_fid(pdu, dfidp); + v9fs_path_free(&path); +out_nofid: + complete_pdu(pdu->s, pdu, err); + v9fs_string_free(&name); +} + +/* Only works with path name based fid */ +static int v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp, + int32_t newdirfid, V9fsString *name) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + char *end; + int err = 0; + V9fsPath new_path; + V9fsFidState *tfidp; + V9fsState *s = pdu->s; + V9fsFidState *dirfidp = NULL; + char *old_name, *new_name; + + v9fs_path_init(&new_path); + if (newdirfid != -1) { + dirfidp = get_fid(pdu, newdirfid); + if (dirfidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + BUG_ON(dirfidp->fid_type != P9_FID_NONE); + v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path); + } else { + old_name = fidp->path.data; + end = strrchr(old_name, '/'); + if (end) { + end++; + } else { + end = old_name; + } + new_name = g_malloc0(end - old_name + name->size + 1); + strncat(new_name, old_name, end - old_name); + strncat(new_name + (end - old_name), name->data, name->size); + v9fs_co_name_to_path(pdu, NULL, new_name, &new_path); + g_free(new_name); + } + err = v9fs_co_rename(pdu, &fidp->path, &new_path); + if (err < 0) { + goto out; + } + /* + * Fixup fid's pointing to the old name to + * start pointing to the new name + */ + for (tfidp = s->fid_list; tfidp; tfidp = tfidp->next) { + if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) { + /* replace the name */ + v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data)); + } + } +out: + if (dirfidp) { + put_fid(pdu, dirfidp); + } + v9fs_path_free(&new_path); +out_nofid: + return err; +} + +/* Only works with path name based fid */ +static void v9fs_rename(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + ssize_t err = 0; + size_t offset = 7; + V9fsString name; + int32_t newdirfid; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dds", &fid, &newdirfid, &name); + if (err < 0) { + goto out_nofid; + } + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + BUG_ON(fidp->fid_type != P9_FID_NONE); + /* if fs driver is not path based, return EOPNOTSUPP */ + if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) { + err = -EOPNOTSUPP; + goto out; + } + v9fs_path_write_lock(s); + err = v9fs_complete_rename(pdu, fidp, newdirfid, &name); + v9fs_path_unlock(s); + if (!err) { + err = offset; + } +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); + v9fs_string_free(&name); +} + +static void v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir, + V9fsString *old_name, V9fsPath *newdir, + V9fsString *new_name) +{ + V9fsFidState *tfidp; + V9fsPath oldpath, newpath; + V9fsState *s = pdu->s; + + + v9fs_path_init(&oldpath); + v9fs_path_init(&newpath); + v9fs_co_name_to_path(pdu, olddir, old_name->data, &oldpath); + v9fs_co_name_to_path(pdu, newdir, new_name->data, &newpath); + + /* + * Fixup fid's pointing to the old name to + * start pointing to the new name + */ + for (tfidp = s->fid_list; tfidp; tfidp = tfidp->next) { + if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) { + /* replace the name */ + v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data)); + } + } + v9fs_path_free(&oldpath); + v9fs_path_free(&newpath); +} + +static int v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid, + V9fsString *old_name, int32_t newdirfid, + V9fsString *new_name) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int err = 0; + V9fsState *s = pdu->s; + V9fsFidState *newdirfidp = NULL, *olddirfidp = NULL; + + olddirfidp = get_fid(pdu, olddirfid); + if (olddirfidp == NULL) { + err = -ENOENT; + goto out; + } + if (newdirfid != -1) { + newdirfidp = get_fid(pdu, newdirfid); + if (newdirfidp == NULL) { + err = -ENOENT; + goto out; + } + } else { + newdirfidp = get_fid(pdu, olddirfid); + } + + err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name, + &newdirfidp->path, new_name); + if (err < 0) { + goto out; + } + if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { + /* Only for path based fid we need to do the below fixup */ + v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name, + &newdirfidp->path, new_name); + } +out: + if (olddirfidp) { + put_fid(pdu, olddirfidp); + } + if (newdirfidp) { + put_fid(pdu, newdirfidp); + } + return err; +} + +static void v9fs_renameat(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + ssize_t err = 0; + size_t offset = 7; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + int32_t olddirfid, newdirfid; + V9fsString old_name, new_name; + + v9fs_string_init(&old_name); + v9fs_string_init(&new_name); + err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid, + &old_name, &newdirfid, &new_name); + if (err < 0) { + goto out_err; + } + + v9fs_path_write_lock(s); + err = v9fs_complete_renameat(pdu, olddirfid, + &old_name, newdirfid, &new_name); + v9fs_path_unlock(s); + if (!err) { + err = offset; + } + +out_err: + complete_pdu(s, pdu, err); + v9fs_string_free(&old_name); + v9fs_string_free(&new_name); +} + +static void v9fs_wstat(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + int err = 0; + int16_t unused; + V9fsStat v9stat; + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_stat_init(&v9stat); + err = pdu_unmarshal(pdu, offset, "dwS", &fid, &unused, &v9stat); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_wstat(pdu->tag, pdu->id, fid, + v9stat.mode, v9stat.atime, v9stat.mtime); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + /* do we need to sync the file? */ + if (donttouch_stat(&v9stat)) { + err = v9fs_co_fsync(pdu, fidp, 0); + goto out; + } + if (v9stat.mode != -1) { + uint32_t v9_mode; + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + goto out; + } + v9_mode = stat_to_v9mode(&stbuf); + if ((v9stat.mode & P9_STAT_MODE_TYPE_BITS) != + (v9_mode & P9_STAT_MODE_TYPE_BITS)) { + /* Attempting to change the type */ + err = -EIO; + goto out; + } +#ifndef CONFIG_WIN32 + err = v9fs_co_chmod(pdu, &fidp->path, + v9mode_to_mode(v9stat.mode, + &v9stat.extension)); +#else + err = -EPERM; +#endif + if (err < 0) { + goto out; + } + } + if (v9stat.mtime != -1 || v9stat.atime != -1) { + struct timespec times[2]; + if (v9stat.atime != -1) { + times[0].tv_sec = v9stat.atime; + times[0].tv_nsec = 0; + } else { + times[0].tv_nsec = UTIME_OMIT; + } + if (v9stat.mtime != -1) { + times[1].tv_sec = v9stat.mtime; + times[1].tv_nsec = 0; + } else { + times[1].tv_nsec = UTIME_OMIT; + } + err = v9fs_co_utimensat(pdu, &fidp->path, times); + if (err < 0) { + goto out; + } + } + if (v9stat.n_gid != -1 || v9stat.n_uid != -1) { +#ifndef CONFIG_WIN32 + err = v9fs_co_chown(pdu, &fidp->path, v9stat.n_uid, v9stat.n_gid); +#else + err = -EPERM; +#endif + if (err < 0) { + goto out; + } + } + if (v9stat.name.size != 0) { + err = v9fs_complete_rename(pdu, fidp, -1, &v9stat.name); + if (err < 0) { + goto out; + } + } + if (v9stat.length != -1) { + err = v9fs_co_truncate(pdu, &fidp->path, v9stat.length); + if (err < 0) { + goto out; + } + } + err = offset; +out: + put_fid(pdu, fidp); +out_nofid: + v9fs_stat_free(&v9stat); + complete_pdu(s, pdu, err); +} + +static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf) +{ + uint32_t f_type; + uint32_t f_bsize; + uint64_t f_blocks; + uint64_t f_bfree; + uint64_t f_bavail; + uint64_t f_files; + uint64_t f_ffree; + uint64_t fsid_val; + uint32_t f_namelen; + size_t offset = 7; + int32_t bsize_factor; + + /* + * compute bsize factor based on host file system block size + * and client msize + */ + bsize_factor = (s->msize - P9_IOHDRSZ)/stbuf->f_bsize; + if (!bsize_factor) { + bsize_factor = 1; + } + f_type = stbuf->f_type; + f_bsize = stbuf->f_bsize; + f_bsize *= bsize_factor; + /* + * f_bsize is adjusted(multiplied) by bsize factor, so we need to + * adjust(divide) the number of blocks, free blocks and available + * blocks by bsize factor + */ + f_blocks = stbuf->f_blocks/bsize_factor; + f_bfree = stbuf->f_bfree/bsize_factor; + f_bavail = stbuf->f_bavail/bsize_factor; + f_files = stbuf->f_files; + f_ffree = stbuf->f_ffree; +#ifdef CONFIG_LINUX + fsid_val = (unsigned int) stbuf->f_fsid.__val[0] | + (unsigned long long)stbuf->f_fsid.__val[1] << 32; + f_namelen = stbuf->f_namelen; +#else + fsid_val = (unsigned int) stbuf->f_fsid.val[0] | + (unsigned long long)stbuf->f_fsid.val[1] << 32; + f_namelen = 255; +#endif + + return pdu_marshal(pdu, offset, "ddqqqqqqd", + f_type, f_bsize, f_blocks, f_bfree, + f_bavail, f_files, f_ffree, + fsid_val, f_namelen); +} + +static void v9fs_statfs(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int32_t fid; + ssize_t retval = 0; + size_t offset = 7; + V9fsFidState *fidp; + struct statfs stbuf; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + retval = pdu_unmarshal(pdu, offset, "d", &fid); + if (retval < 0) { + goto out_nofid; + } + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + retval = -ENOENT; + goto out_nofid; + } + retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf); + if (retval < 0) { + goto out; + } + retval = v9fs_fill_statfs(s, pdu, &stbuf); + if (retval < 0) { + goto out; + } + retval += offset; +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, retval); + return; +} + +static void v9fs_mknod(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + + int mode; + gid_t gid; + int32_t fid; + V9fsQID qid; + int err = 0; + int major, minor; + size_t offset = 7; + V9fsString name; + struct stat stbuf; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode, + &major, &minor, &gid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } +#ifndef CONFIG_WIN32 + err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid, + makedev(major, minor), mode, &stbuf); +#else + err = -EPERM; +#endif + if (err < 0) { + goto out; + } + stat_to_qid(&stbuf, &qid); + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_mknod_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); + v9fs_string_free(&name); +} + +/* + * Implement posix byte range locking code + * Server side handling of locking code is very simple, because 9p server in + * QEMU can handle only one client. And most of the lock handling + * (like conflict, merging) etc is done by the VFS layer itself, so no need to + * do any thing in * qemu 9p server side lock code path. + * So when a TLOCK request comes, always return success + */ +static void v9fs_lock(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + int8_t status; + V9fsFlock flock; + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + int32_t fid, err = 0; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + status = P9_LOCK_ERROR; + v9fs_string_init(&flock.client_id); + err = pdu_unmarshal(pdu, offset, "dbdqqds", &fid, &flock.type, + &flock.flags, &flock.start, &flock.length, + &flock.proc_id, &flock.client_id); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_lock(pdu->tag, pdu->id, fid, + flock.type, flock.start, flock.length); + + + /* We support only block flag now (that too ignored currently) */ + if (flock.flags & ~P9_LOCK_FLAGS_BLOCK) { + err = -EINVAL; + goto out_nofid; + } + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_fstat(pdu, fidp, &stbuf); + if (err < 0) { + goto out; + } + status = P9_LOCK_SUCCESS; +out: + put_fid(pdu, fidp); +out_nofid: + err = pdu_marshal(pdu, offset, "b", status); + if (err > 0) { + err += offset; + } + trace_v9fs_lock_return(pdu->tag, pdu->id, status); + complete_pdu(s, pdu, err); + v9fs_string_free(&flock.client_id); +} + +/* + * When a TGETLOCK request comes, always return success because all lock + * handling is done by client's VFS layer. + */ +static void v9fs_getlock(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + V9fsGetlock glock; + int32_t fid, err = 0; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_string_init(&glock.client_id); + err = pdu_unmarshal(pdu, offset, "dbqqds", &fid, &glock.type, + &glock.start, &glock.length, &glock.proc_id, + &glock.client_id); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_getlock(pdu->tag, pdu->id, fid, + glock.type, glock.start, glock.length); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_fstat(pdu, fidp, &stbuf); + if (err < 0) { + goto out; + } + glock.type = P9_LOCK_TYPE_UNLCK; + err = pdu_marshal(pdu, offset, "bqqds", glock.type, + glock.start, glock.length, glock.proc_id, + &glock.client_id); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_getlock_return(pdu->tag, pdu->id, glock.type, glock.start, + glock.length, glock.proc_id); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(s, pdu, err); + v9fs_string_free(&glock.client_id); +} + +static void v9fs_mkdir(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + V9fsPDU *pdu = opaque; + size_t offset = 7; + int32_t fid; + struct stat stbuf; + V9fsQID qid; + V9fsString name; + V9fsFidState *fidp; + gid_t gid; + int mode; + int err = 0; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsdd", &fid, &name, &mode, &gid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf); + if (err < 0) { + goto out; + } + stat_to_qid(&stbuf, &qid); + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_mkdir_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, err); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(pdu->s, pdu, err); + v9fs_string_free(&name); +} + +#ifndef CONFIG_WIN32 +static void v9fs_xattrwalk(void *opaque) +{ + int64_t size; + V9fsString name; + ssize_t err = 0; + size_t offset = 7; + int32_t fid, newfid; + V9fsFidState *file_fidp; + V9fsFidState *xattr_fidp = NULL; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data); + + file_fidp = get_fid(pdu, fid); + if (file_fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + xattr_fidp = alloc_fid(s, newfid); + if (xattr_fidp == NULL) { + err = -EINVAL; + goto out; + } + v9fs_path_copy(&xattr_fidp->path, &file_fidp->path); + if (name.data == NULL) { + /* + * listxattr request. Get the size first + */ + size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0); + if (size < 0) { + err = size; + clunk_fid(s, xattr_fidp->fid); + goto out; + } + /* + * Read the xattr value + */ + xattr_fidp->fs.xattr.len = size; + xattr_fidp->fid_type = P9_FID_XATTR; + xattr_fidp->fs.xattr.copied_len = -1; + if (size) { + xattr_fidp->fs.xattr.value = g_malloc(size); + err = v9fs_co_llistxattr(pdu, &xattr_fidp->path, + xattr_fidp->fs.xattr.value, + xattr_fidp->fs.xattr.len); + if (err < 0) { + clunk_fid(s, xattr_fidp->fid); + goto out; + } + } + err = pdu_marshal(pdu, offset, "q", size); + if (err < 0) { + goto out; + } + err += offset; + } else { + /* + * specific xattr fid. We check for xattr + * presence also collect the xattr size + */ + size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, + &name, NULL, 0); + if (size < 0) { + err = size; + clunk_fid(s, xattr_fidp->fid); + goto out; + } + /* + * Read the xattr value + */ + xattr_fidp->fs.xattr.len = size; + xattr_fidp->fid_type = P9_FID_XATTR; + xattr_fidp->fs.xattr.copied_len = -1; + if (size) { + xattr_fidp->fs.xattr.value = g_malloc(size); + err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, + &name, xattr_fidp->fs.xattr.value, + xattr_fidp->fs.xattr.len); + if (err < 0) { + clunk_fid(s, xattr_fidp->fid); + goto out; + } + } + err = pdu_marshal(pdu, offset, "q", size); + if (err < 0) { + goto out; + } + err += offset; + } + trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size); +out: + put_fid(pdu, file_fidp); + if (xattr_fidp) { + put_fid(pdu, xattr_fidp); + } +out_nofid: + complete_pdu(s, pdu, err); + v9fs_string_free(&name); +} + +static void v9fs_xattrcreate(void *opaque) +{ + int flags; + int32_t fid; + int64_t size; + ssize_t err = 0; + V9fsString name; + size_t offset = 7; + V9fsFidState *file_fidp; + V9fsFidState *xattr_fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsqd", &fid, &name, &size, &flags); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags); + + file_fidp = get_fid(pdu, fid); + if (file_fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + /* Make the file fid point to xattr */ + xattr_fidp = file_fidp; + xattr_fidp->fid_type = P9_FID_XATTR; + xattr_fidp->fs.xattr.copied_len = 0; + xattr_fidp->fs.xattr.len = size; + xattr_fidp->fs.xattr.flags = flags; + v9fs_string_init(&xattr_fidp->fs.xattr.name); + v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name); + xattr_fidp->fs.xattr.value = g_malloc(size); + err = offset; + put_fid(pdu, file_fidp); +out_nofid: + complete_pdu(s, pdu, err); + v9fs_string_free(&name); +} +#endif + +static void v9fs_readlink(void *opaque) +{ + TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__); + V9fsPDU *pdu = opaque; + size_t offset = 7; + V9fsString target; + int32_t fid; + int err = 0; + V9fsFidState *fidp; + + err = pdu_unmarshal(pdu, offset, "d", &fid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_readlink(pdu->tag, pdu->id, fid); + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + + v9fs_string_init(&target); + err = v9fs_co_readlink(pdu, &fidp->path, &target); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "s", &target); + if (err < 0) { + v9fs_string_free(&target); + goto out; + } + err += offset; + trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data); + v9fs_string_free(&target); +out: + put_fid(pdu, fidp); +out_nofid: + complete_pdu(pdu->s, pdu, err); +} + +static CoroutineEntry *pdu_co_handlers[] = { + [P9_TREADDIR] = v9fs_readdir, + [P9_TSTATFS] = v9fs_statfs, + [P9_TGETATTR] = v9fs_getattr, + [P9_TSETATTR] = v9fs_setattr, +#ifndef CONFIG_WIN32 + [P9_TXATTRWALK] = v9fs_xattrwalk, + [P9_TXATTRCREATE] = v9fs_xattrcreate, +#endif + [P9_TMKNOD] = v9fs_mknod, + [P9_TRENAME] = v9fs_rename, + [P9_TLOCK] = v9fs_lock, + [P9_TGETLOCK] = v9fs_getlock, + [P9_TRENAMEAT] = v9fs_renameat, + [P9_TREADLINK] = v9fs_readlink, + [P9_TUNLINKAT] = v9fs_unlinkat, + [P9_TMKDIR] = v9fs_mkdir, + [P9_TVERSION] = v9fs_version, + [P9_TLOPEN] = v9fs_open, + [P9_TATTACH] = v9fs_attach, + [P9_TSTAT] = v9fs_stat, + [P9_TWALK] = v9fs_walk, + [P9_TCLUNK] = v9fs_clunk, + [P9_TFSYNC] = v9fs_fsync, + [P9_TOPEN] = v9fs_open, + [P9_TREAD] = v9fs_read, +#if 0 + [P9_TAUTH] = v9fs_auth, +#endif + [P9_TFLUSH] = v9fs_flush, + [P9_TLINK] = v9fs_link, + [P9_TSYMLINK] = v9fs_symlink, + [P9_TCREATE] = v9fs_create, + [P9_TLCREATE] = v9fs_lcreate, + [P9_TWRITE] = v9fs_write, + [P9_TWSTAT] = v9fs_wstat, + [P9_TREMOVE] = v9fs_remove, +}; + +static void v9fs_op_not_supp(void *opaque) +{ + WARN("[%d][%s] >> This operation is not supported.\n", __LINE__, __func__); + V9fsPDU *pdu = opaque; + complete_pdu(pdu->s, pdu, -EOPNOTSUPP); +} + +static void v9fs_fs_ro(void *opaque) +{ + WARN("[%d][%s] >> This is the read-only operation.\n", __LINE__, __func__); + V9fsPDU *pdu = opaque; + complete_pdu(pdu->s, pdu, -EROFS); +} + +static inline bool is_read_only_op(V9fsPDU *pdu) +{ + switch (pdu->id) { + case P9_TREADDIR: + case P9_TSTATFS: + case P9_TGETATTR: + case P9_TXATTRWALK: + case P9_TLOCK: + case P9_TGETLOCK: + case P9_TREADLINK: + case P9_TVERSION: + case P9_TLOPEN: + case P9_TATTACH: + case P9_TSTAT: + case P9_TWALK: + case P9_TCLUNK: + case P9_TFSYNC: + case P9_TOPEN: + case P9_TREAD: + case P9_TAUTH: + case P9_TFLUSH: + return 1; + default: + return 0; + } +} + +static void submit_pdu(V9fsState *s, V9fsPDU *pdu) +{ + Coroutine *co; + CoroutineEntry *handler; + + if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) || + (pdu_co_handlers[pdu->id] == NULL)) { + handler = v9fs_op_not_supp; + } else { + handler = pdu_co_handlers[pdu->id]; + } + + if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) { + handler = v9fs_fs_ro; + } + co = qemu_coroutine_create(handler); + qemu_coroutine_enter(co, pdu); +} + +void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) +{ + V9fsState *s = (V9fsState *)vdev; + V9fsPDU *pdu; + ssize_t len; + + while ((pdu = alloc_pdu(s)) && + (len = virtqueue_pop(vq, &pdu->elem)) != 0) { + uint8_t *ptr; + pdu->s = s; + BUG_ON(pdu->elem.out_num == 0 || pdu->elem.in_num == 0); + BUG_ON(pdu->elem.out_sg[0].iov_len < 7); + + ptr = pdu->elem.out_sg[0].iov_base; + + pdu->size = le32_to_cpu(*(uint32_t *)ptr); + pdu->id = ptr[4]; + pdu->tag = le16_to_cpu(*(uint16_t *)(ptr + 5)); + qemu_co_queue_init(&pdu->complete); + submit_pdu(s, pdu); + } + free_pdu(s, pdu); +} + +static void __attribute__((__constructor__)) virtio_9p_set_fd_limit(void) +{ +#ifndef CONFIG_WIN32 + struct rlimit rlim; + if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) { + fprintf(stderr, "Failed to get the resource limit\n"); + exit(1); + } + open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur/3); + open_fd_rc = rlim.rlim_cur/2; +#else + open_fd_hw = 3696; // 4096 - 400 + open_fd_rc = 2048; // 4096 / 2 +#endif +} diff --cc hw/Makefile.objs index a6ac525e0e,52a1464051..a43a06a215 --- a/hw/Makefile.objs +++ b/hw/Makefile.objs @@@ -29,8 -29,7 +29,9 @@@ devices-dirs-$(CONFIG_SOFTMMU) += usb devices-dirs-$(CONFIG_VIRTIO) += virtio/ devices-dirs-$(CONFIG_SOFTMMU) += watchdog/ devices-dirs-$(CONFIG_SOFTMMU) += xen/ + devices-dirs-$(CONFIG_MEM_HOTPLUG) += mem/ +devices-dirs-$(CONFIG_BUILD_VIGS) += yagl/ +devices-dirs-$(CONFIG_BUILD_VIGS) += vigs/ devices-dirs-y += core/ common-obj-y += $(devices-dirs-y) obj-y += $(devices-dirs-y) diff --cc hw/acpi/piix4.c index 9bfea6c5f6,481a16c60a..9842caaf65 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@@ -34,11 -33,11 +34,15 @@@ #include "hw/acpi/pcihp.h" #include "hw/acpi/cpu_hotplug.h" #include "hw/hotplug.h" + #include "hw/mem/pc-dimm.h" + #include "hw/acpi/memory_hotplug.h" + #include "hw/acpi/acpi_dev_interface.h" + #include "hw/xen/xen.h" +#ifdef CONFIG_MARU +#include "tizen/src/hw/maru_pm.h" +#endif + //#define DEBUG #ifdef DEBUG diff --cc hw/i386/pc.c index d6628d7f60,f31d55e7ea..6e83a17d30 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@@ -58,10 -58,11 +58,14 @@@ #include "hw/boards.h" #include "hw/pci/pci_host.h" #include "acpi-build.h" + #include "hw/mem/pc-dimm.h" + #include "trace.h" + #include "qapi/visitor.h" + #include "qapi-visit.h" +#ifdef CONFIG_MARU +#include "tizen/src/util/maru_err_table.h" +#endif /* debug PC/ISA interrupts */ //#define DEBUG_IRQ diff --cc hw/i386/pc_piix.c index 89636909e9,85ed3c8785..d6d09e146e --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@@ -257,32 -303,59 +303,68 @@@ static void pc_init1(MachineState *mach } } - static void pc_init_pci(QEMUMachineInitArgs *args) + static void pc_init_pci(MachineState *machine) { - pc_init1(args, 1, 1); + pc_init1(machine, 1, 1); } +#ifdef CONFIG_MARU - void maru_pc_init_pci(QEMUMachineInitArgs *args); ++void maru_pc_init_pci(MachineState *machine); + - void maru_pc_init_pci(QEMUMachineInitArgs *args) ++void maru_pc_init_pci(MachineState *machine) +{ - pc_init_pci(args); ++ pc_init_pci(machine); +} +#endif + - static void pc_compat_1_7(QEMUMachineInitArgs *args) + static void pc_compat_2_1(MachineState *machine) + { + PCMachineState *pcms = PC_MACHINE(machine); + smbios_uuid_encoded = false; + x86_cpu_compat_set_features("coreduo", FEAT_1_ECX, CPUID_EXT_VMX, 0); + x86_cpu_compat_set_features("core2duo", FEAT_1_ECX, CPUID_EXT_VMX, 0); + x86_cpu_compat_kvm_no_autodisable(FEAT_8000_0001_ECX, CPUID_EXT3_SVM); + pcms->enforce_aligned_dimm = false; + } + + static void pc_compat_2_0(MachineState *machine) + { + pc_compat_2_1(machine); + /* This value depends on the actual DSDT and SSDT compiled into + * the source QEMU; unfortunately it depends on the binary and + * not on the machine type, so we cannot make pc-i440fx-1.7 work on + * both QEMU 1.7 and QEMU 2.0. + * + * Large variations cause migration to fail for more than one + * consecutive value of the "-smp" maxcpus option. + * + * For small variations of the kind caused by different iasl versions, + * the 4k rounding usually leaves slack. However, there could be still + * one or two values that break. For QEMU 1.7 and QEMU 2.0 the + * slack is only ~10 bytes before one "-smp maxcpus" value breaks! + * + * 6652 is valid for QEMU 2.0, the right value for pc-i440fx-1.7 on + * QEMU 1.7 it is 6414. For RHEL/CentOS 7.0 it is 6418. + */ + legacy_acpi_table_size = 6652; + smbios_legacy_mode = true; + has_reserved_memory = false; + pc_set_legacy_acpi_data_size(); + } + + static void pc_compat_1_7(MachineState *machine) { - smbios_type1_defaults = false; + pc_compat_2_0(machine); + smbios_defaults = false; gigabyte_align = false; option_rom_has_mr = true; - x86_cpu_compat_disable_kvm_features(FEAT_1_ECX, CPUID_EXT_X2APIC); + legacy_acpi_table_size = 6414; + x86_cpu_compat_kvm_no_autoenable(FEAT_1_ECX, CPUID_EXT_X2APIC); } - static void pc_compat_1_6(QEMUMachineInitArgs *args) + static void pc_compat_1_6(MachineState *machine) { - pc_compat_1_7(args); - has_pci_info = false; + pc_compat_1_7(machine); rom_file_has_mr = false; has_acpi_build = false; } diff --cc hw/input/ps2.c index 8d1731cf79,a466e250a4..dff3c82055 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@@ -24,13 -24,9 +24,14 @@@ #include "hw/hw.h" #include "hw/input/ps2.h" #include "ui/console.h" + #include "ui/input.h" #include "sysemu/sysemu.h" +#ifdef CONFIG_MARU +/* to guarantee safe serialization of input event by Munkyu Im */ +#include "qemu/thread.h" +static QemuMutex mutex; +#endif /* debug PC keyboard */ //#define DEBUG_KBD @@@ -366,32 -384,46 +389,53 @@@ static void ps2_mouse_event(DeviceStat if (!(s->mouse_status & MOUSE_STATUS_ENABLED)) return; - s->mouse_dx += dx; - s->mouse_dy -= dy; - s->mouse_dz += dz; - /* XXX: SDL sometimes generates nul events: we delete them */ - if (s->mouse_dx == 0 && s->mouse_dy == 0 && s->mouse_dz == 0 && - s->mouse_buttons == buttons_state) - return; - s->mouse_buttons = buttons_state; + switch (evt->kind) { + case INPUT_EVENT_KIND_REL: + if (evt->rel->axis == INPUT_AXIS_X) { + s->mouse_dx += evt->rel->value; + } else if (evt->rel->axis == INPUT_AXIS_Y) { + s->mouse_dy -= evt->rel->value; + } + break; - if (buttons_state) { - qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); + case INPUT_EVENT_KIND_BTN: + if (evt->btn->down) { + s->mouse_buttons |= bmap[evt->btn->button]; + if (evt->btn->button == INPUT_BUTTON_WHEEL_UP) { + s->mouse_dz--; + } else if (evt->btn->button == INPUT_BUTTON_WHEEL_DOWN) { + s->mouse_dz++; + } + } else { + s->mouse_buttons &= ~bmap[evt->btn->button]; + } + break; + + default: + /* keep gcc happy */ + break; } + } - if (!(s->mouse_status & MOUSE_STATUS_REMOTE) && - (s->common.queue.count < (PS2_QUEUE_SIZE - 16))) { - for(;;) { + static void ps2_mouse_sync(DeviceState *dev) + { + PS2MouseState *s = (PS2MouseState *)dev; + + if (s->mouse_buttons) { + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); + } + if (!(s->mouse_status & MOUSE_STATUS_REMOTE)) { + while (s->common.queue.count < PS2_QUEUE_SIZE - 4) { /* if not remote, send event. Multiple events are sent if too big deltas */ +#ifdef CONFIG_MARU +/* to guarantee safe serialization of input event by Munkyu Im */ + qemu_mutex_lock(&mutex); + ps2_mouse_send_packet(s); + qemu_mutex_unlock(&mutex); +#else ps2_mouse_send_packet(s); +#endif if (s->mouse_dx == 0 && s->mouse_dy == 0 && s->mouse_dz == 0) break; } @@@ -682,11 -787,8 +799,12 @@@ void *ps2_mouse_init(void (*update_irq) s->common.update_irq = update_irq; s->common.update_arg = update_arg; vmstate_register(NULL, 0, &vmstate_ps2_mouse, s); - qemu_add_mouse_event_handler(ps2_mouse_event, s, 0, "QEMU PS/2 Mouse"); + qemu_input_handler_register((DeviceState *)s, + &ps2_mouse_handler); qemu_register_reset(ps2_mouse_reset, s); +#ifdef CONFIG_MARU + /* to guarantee safe serialization of input event by Munkyu Im */ + qemu_mutex_init(&mutex); +#endif return s; } diff --cc hw/vigs/vigs_device.c index c3185da198,0000000000..562704bfdc mode 100644,000000..100644 --- a/hw/vigs/vigs_device.c +++ b/hw/vigs/vigs_device.c @@@ -1,546 -1,0 +1,538 @@@ +/* + * vigs + * + * Copyright (c) 2000 - 2013 Samsung Electronics Co., Ltd. All rights reserved. + * + * Contact: + * Stanislav Vorobiov + * Jinhyung Jo + * YeongKyoon Lee + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + */ + +#include "vigs_log.h" +#include "vigs_server.h" +#include "vigs_backend.h" +#include "vigs_regs.h" +#include "vigs_fenceman.h" +#include "display.h" +#include "work_queue.h" +#include "winsys.h" +#include "hw/hw.h" +#include "hw/pci/pci.h" +#include "ui/console.h" +#include "qemu/main-loop.h" + +#define PCI_VENDOR_ID_VIGS 0x19B2 +#define PCI_DEVICE_ID_VIGS 0x1011 + +#define VIGS_IO_SIZE 0x1000 + +#ifndef CONFIG_USE_SHM +#define VIGS_EXTRA_INVALIDATION (9) +#else +#define VIGS_EXTRA_INVALIDATION (0) +#endif + +typedef struct VIGSState +{ + PCIDevice dev; + + char *display; + char *render_queue; + char *backend; + char *wsi; + MemoryRegion vram_bar; + uint32_t vram_size; + + MemoryRegion ram_bar; + uint32_t ram_size; + + MemoryRegion io_bar; + + struct vigs_fenceman *fenceman; + + QEMUBH *fence_ack_bh; + + struct vigs_server *server; + + /* + * Our console. + */ + QemuConsole *con; + int invalidate_cnt; + + uint32_t reg_con; + uint32_t reg_int; +} VIGSState; + +#define TYPE_VIGS_DEVICE "vigs" + +static void vigs_update_irq(VIGSState *s) +{ + bool raise = false; + + if ((s->reg_con & VIGS_REG_CON_VBLANK_ENABLE) && + (s->reg_int & VIGS_REG_INT_VBLANK_PENDING)) { + raise = true; + } + + if (s->reg_int & VIGS_REG_INT_FENCE_ACK_PENDING) { + raise = true; + } + + if (raise) { + pci_set_irq(&s->dev, 1); + } else { + pci_set_irq(&s->dev, 0); + } +} + +static void vigs_fence_ack_bh(void *opaque) +{ + VIGSState *s = opaque; + + if (vigs_fenceman_pending(s->fenceman)) { + s->reg_int |= VIGS_REG_INT_FENCE_ACK_PENDING; + } + + vigs_update_irq(s); +} + +static void vigs_hw_update(void *opaque) +{ + VIGSState *s = opaque; + DisplaySurface *ds = qemu_console_surface(s->con); + + if (!surface_data(ds)) { + return; + } + + if (vigs_server_update_display(s->server, s->invalidate_cnt)) { + /* + * 'vigs_server_update_display' could have updated the surface, + * so fetch it again. + */ + ds = qemu_console_surface(s->con); + + dpy_gfx_update(s->con, 0, 0, surface_width(ds), surface_height(ds)); + } + + if (s->invalidate_cnt > 0) { + s->invalidate_cnt--; + } + + if (s->reg_con & VIGS_REG_CON_VBLANK_ENABLE) { + s->reg_int |= VIGS_REG_INT_VBLANK_PENDING; + vigs_update_irq(s); + } +} + +static void vigs_hw_invalidate(void *opaque) +{ + VIGSState *s = opaque; + + s->invalidate_cnt = 1 + VIGS_EXTRA_INVALIDATION; +} + +static void vigs_dpy_resize(void *user_data, + uint32_t width, + uint32_t height) +{ + VIGSState *s = user_data; + DisplaySurface *ds = qemu_console_surface(s->con); + + if ((width != surface_width(ds)) || + (height != surface_height(ds))) { + qemu_console_resize(s->con, width, height); + } +} + +static uint32_t vigs_dpy_get_stride(void *user_data) +{ + VIGSState *s = user_data; + DisplaySurface *ds = qemu_console_surface(s->con); + + return surface_stride(ds); +} + +static uint32_t vigs_dpy_get_bpp(void *user_data) +{ + VIGSState *s = user_data; + DisplaySurface *ds = qemu_console_surface(s->con); + + return surface_bytes_per_pixel(ds); +} + +static uint8_t *vigs_dpy_get_data(void *user_data) +{ + VIGSState *s = user_data; + DisplaySurface *ds = qemu_console_surface(s->con); + + return surface_data(ds); +} + +static void vigs_fence_ack(void *user_data, + uint32_t fence_seq) +{ + VIGSState *s = user_data; + + vigs_fenceman_ack(s->fenceman, fence_seq); + + qemu_bh_schedule(s->fence_ack_bh); +} + +static uint64_t vigs_io_read(void *opaque, hwaddr offset, + unsigned size) +{ + VIGSState *s = opaque; + + switch (offset) { + case VIGS_REG_CON: + return s->reg_con; + case VIGS_REG_INT: + return s->reg_int; + case VIGS_REG_FENCE_LOWER: + return vigs_fenceman_get_lower(s->fenceman); + case VIGS_REG_FENCE_UPPER: + return vigs_fenceman_get_upper(s->fenceman); + default: + VIGS_LOG_CRITICAL("Bad register 0x%X read", (uint32_t)offset); + break; + } + + return 0; +} + +static void vigs_io_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + VIGSState *s = opaque; + + switch (offset) { + case VIGS_REG_EXEC: + vigs_server_dispatch(s->server, value); + break; + case VIGS_REG_CON: + if (((s->reg_con & VIGS_REG_CON_VBLANK_ENABLE) == 0) && + (value & VIGS_REG_CON_VBLANK_ENABLE)) { + VIGS_LOG_DEBUG("VBLANK On"); + } else if (((value & VIGS_REG_CON_VBLANK_ENABLE) == 0) && + (s->reg_con & VIGS_REG_CON_VBLANK_ENABLE)) { + VIGS_LOG_DEBUG("VBLANK Off"); + } + + s->reg_con = value & VIGS_REG_CON_MASK; + + vigs_update_irq(s); + break; + case VIGS_REG_INT: + if (value & VIGS_REG_INT_VBLANK_PENDING) { + value &= ~VIGS_REG_INT_VBLANK_PENDING; + } else { + value |= (s->reg_int & VIGS_REG_INT_VBLANK_PENDING); + } + + if (value & VIGS_REG_INT_FENCE_ACK_PENDING) { + value &= ~VIGS_REG_INT_FENCE_ACK_PENDING; + } else { + value |= (s->reg_int & VIGS_REG_INT_FENCE_ACK_PENDING); + } + + s->reg_int = value & VIGS_REG_INT_MASK; + + vigs_update_irq(s); + break; + default: + VIGS_LOG_CRITICAL("Bad register 0x%X write", (uint32_t)offset); + break; + } +} + +static struct GraphicHwOps vigs_hw_ops = +{ + .invalidate = vigs_hw_invalidate, + .gfx_update = vigs_hw_update +}; + +static const MemoryRegionOps vigs_io_ops = +{ + .read = vigs_io_read, + .write = vigs_io_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static struct vigs_display_ops vigs_dpy_ops = +{ + .resize = vigs_dpy_resize, + .get_stride = vigs_dpy_get_stride, + .get_bpp = vigs_dpy_get_bpp, + .get_data = vigs_dpy_get_data, + .fence_ack = vigs_fence_ack, +}; + +static int vigs_device_init(PCIDevice *dev) +{ + VIGSState *s = DO_UPCAST(VIGSState, dev, dev); + DisplayObject *dobj = NULL; + WorkQueueObject *wqobj = NULL; + WSIObject *wsiobj = NULL; + struct vigs_backend *backend = NULL; + + if (s->display) { + dobj = displayobject_find(s->display); + + if (!dobj) { + error_report("display '%s' not found", s->display); + return -1; + } + } else { + bool ambiguous; + + dobj = displayobject_create(&ambiguous); + + if (ambiguous) { + error_report("ambiguous display, set 'display' property"); + return -1; + } + + if (!dobj) { + error_report("unable to create display"); + return -1; + } + } + + if (s->render_queue) { + wqobj = workqueueobject_find(s->render_queue); + + if (!wqobj) { + error_report("work queue '%s' not found", s->render_queue); + return -1; + } + } else { + bool ambiguous; + + wqobj = workqueueobject_create(&ambiguous); + + if (ambiguous) { + error_report("ambiguous work queue, set 'render_queue' property"); + return -1; + } + + if (!wqobj) { + error_report("unable to create work queue"); + return -1; + } + } + + if (!s->backend) { + error_report("'backend' property not set"); + return -1; + } + + if (strcmp(s->backend, "gl") && strcmp(s->backend, "sw")) { + error_report("backend '%s' not found", s->backend); + return -1; + } + + if (s->wsi) { + Error *err = NULL; + + wsiobj = WSIOBJECT(object_new(TYPE_WSIOBJECT)); + + object_property_add_child(container_get(object_get_root(), "/objects"), + s->wsi, &wsiobj->base, &err); + + object_unref(&wsiobj->base); + + if (err) { + qerror_report_err(err); + error_free(err); + return -1; + } + } + + vigs_log_init(); + + if (s->vram_size < 16 * 1024 * 1024) { + VIGS_LOG_WARN("\"vram_size\" is too small, defaulting to 16mb"); + s->vram_size = 16 * 1024 * 1024; + } + + if (s->ram_size < 1 * 1024 * 1024) { + VIGS_LOG_WARN("\"ram_size\" is too small, defaulting to 1mb"); + s->ram_size = 1 * 1024 * 1024; + } + + pci_config_set_interrupt_pin(dev->config, 1); + + memory_region_init_ram(&s->vram_bar, OBJECT(s), + TYPE_VIGS_DEVICE ".vram", - s->vram_size); ++ s->vram_size, &error_abort); + + memory_region_init_ram(&s->ram_bar, OBJECT(s), + TYPE_VIGS_DEVICE ".ram", - s->ram_size); ++ s->ram_size, &error_abort); + + memory_region_init_io(&s->io_bar, OBJECT(s), + &vigs_io_ops, + s, + TYPE_VIGS_DEVICE ".io", + VIGS_IO_SIZE); + + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->vram_bar); + pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_bar); + pci_register_bar(&s->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_bar); + + if (!strcmp(s->backend, "gl")) { + backend = vigs_gl_backend_create(dobj->dpy); + } else if (!strcmp(s->backend, "sw")) { + backend = vigs_sw_backend_create(); + } + + if (!backend) { + goto fail; + } + + s->fenceman = vigs_fenceman_create(); + + s->fence_ack_bh = qemu_bh_new(vigs_fence_ack_bh, s); + + s->con = graphic_console_init(DEVICE(dev), 0, &vigs_hw_ops, s); + + if (!s->con) { + goto fail; + } + + s->server = vigs_server_create(memory_region_get_ram_ptr(&s->vram_bar), + memory_region_get_ram_ptr(&s->ram_bar), + &vigs_dpy_ops, + s, + backend, + wqobj->wq); + + if (!s->server) { + goto fail; + } + + if (wsiobj) { + wsiobj->wsi = &s->server->wsi; + if (!strcmp(s->backend, "gl")) { + wsiobj->gl_wsi = &s->server->wsi; + } + } + + VIGS_LOG_INFO("VIGS initialized"); + + VIGS_LOG_DEBUG("vram_size = %u", s->vram_size); + VIGS_LOG_DEBUG("ram_size = %u", s->ram_size); + + return 0; + +fail: + if (backend) { + backend->destroy(backend); + } + + if (s->fence_ack_bh) { + qemu_bh_delete(s->fence_ack_bh); + } + + if (s->fenceman) { + vigs_fenceman_destroy(s->fenceman); + } + - memory_region_destroy(&s->io_bar); - memory_region_destroy(&s->ram_bar); - memory_region_destroy(&s->vram_bar); - + vigs_log_cleanup(); + + return -1; +} + +static void vigs_device_reset(DeviceState *d) +{ + VIGSState *s = container_of(d, VIGSState, dev.qdev); + + vigs_server_reset(s->server); + + vigs_fenceman_reset(s->fenceman); + + pci_set_irq(&s->dev, 0); + + s->reg_con = 0; + s->reg_int = 0; + + VIGS_LOG_INFO("VIGS reset"); +} + +static void vigs_device_exit(PCIDevice *dev) +{ + VIGSState *s = DO_UPCAST(VIGSState, dev, dev); + + vigs_server_destroy(s->server); + + qemu_bh_delete(s->fence_ack_bh); + + vigs_fenceman_destroy(s->fenceman); + - memory_region_destroy(&s->io_bar); - memory_region_destroy(&s->ram_bar); - memory_region_destroy(&s->vram_bar); - + VIGS_LOG_INFO("VIGS deinitialized"); + + vigs_log_cleanup(); +} + +static Property vigs_properties[] = { + DEFINE_PROP_STRING("display", VIGSState, display), + DEFINE_PROP_STRING("render_queue", VIGSState, render_queue), + DEFINE_PROP_STRING("backend", VIGSState, backend), + DEFINE_PROP_STRING("wsi", VIGSState, wsi), + DEFINE_PROP_UINT32("vram_size", VIGSState, vram_size, + 32 * 1024 * 1024), + DEFINE_PROP_UINT32("ram_size", VIGSState, ram_size, + 1 * 1024 * 1024), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vigs_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->init = vigs_device_init; + k->exit = vigs_device_exit; + k->vendor_id = PCI_VENDOR_ID_VIGS; + k->device_id = PCI_DEVICE_ID_VIGS; + k->class_id = PCI_CLASS_DISPLAY_VGA; + dc->reset = vigs_device_reset; + dc->props = vigs_properties; + dc->desc = "VIGS device"; +} + +static TypeInfo vigs_device_info = +{ + .name = TYPE_VIGS_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(VIGSState), + .class_init = vigs_class_init, +}; + +static void vigs_register_types(void) +{ + type_register_static(&vigs_device_info); +} + +type_init(vigs_register_types) diff --cc hw/yagl/yagl_device.c index de24498883,0000000000..60fbfc8c9f mode 100644,000000..100644 --- a/hw/yagl/yagl_device.c +++ b/hw/yagl/yagl_device.c @@@ -1,435 -1,0 +1,433 @@@ +/* + * yagl + * + * Copyright (c) 2000 - 2013 Samsung Electronics Co., Ltd. All rights reserved. + * + * Contact: + * Stanislav Vorobiov + * Jinhyung Jo + * YeongKyoon Lee + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + */ + +#include "yagl_server.h" +#include "yagl_log.h" +#include "yagl_handle_gen.h" +#include "yagl_stats.h" +#include "yagl_process.h" +#include "yagl_thread.h" +#include "yagl_egl_driver.h" +#include "yagl_drivers/gles_ogl/yagl_gles_ogl.h" +#include "yagl_drivers/gles_onscreen/yagl_gles_onscreen.h" +#include "yagl_backends/egl_offscreen/yagl_egl_offscreen.h" +#include "yagl_backends/egl_onscreen/yagl_egl_onscreen.h" +#include "exec/cpu-all.h" +#include "hw/hw.h" +#include "hw/pci/pci.h" +#include "qemu/error-report.h" +#include +#include "hw/vigs/display.h" +#include "hw/vigs/winsys.h" +#include "yagl_gles_driver.h" + +#define PCI_VENDOR_ID_YAGL 0x19B1 +#define PCI_DEVICE_ID_YAGL 0x1010 + +#define YAGL_REG_BUFFPTR 0 +#define YAGL_REG_TRIGGER 4 +#define YAGL_REGS_SIZE 8 + +#define YAGL_MEM_SIZE 0x1000 + +#define YAGL_MAX_USERS (YAGL_MEM_SIZE / YAGL_REGS_SIZE) + +struct yagl_user +{ + bool activated; + yagl_pid process_id; + yagl_tid thread_id; +}; + +typedef struct YaGLState +{ + PCIDevice dev; + + char *display; + char *render_queue; + char *wsi; + MemoryRegion iomem; + struct yagl_server_state *ss; + struct yagl_user users[YAGL_MAX_USERS]; +} YaGLState; + +#define TYPE_YAGL_DEVICE "yagl" + +static void yagl_device_operate(YaGLState *s, int user_index, hwaddr buff_pa) +{ + yagl_pid target_pid; + yagl_tid target_tid; + hwaddr buff_len = TARGET_PAGE_SIZE; + uint8_t *buff = NULL; + + YAGL_LOG_FUNC_ENTER(yagl_device_operate, + "user_index = %d, buff_pa = 0x%X", + user_index, + (uint32_t)buff_pa); + + if (!buff_pa && !s->users[user_index].activated) { + YAGL_LOG_CRITICAL("user %d is not activated", user_index); + goto out; + } + + if (buff_pa) { + buff = cpu_physical_memory_map(buff_pa, &buff_len, false); + + if (!buff || (buff_len != TARGET_PAGE_SIZE)) { + YAGL_LOG_CRITICAL("cpu_physical_memory_map(read) failed for user %d, buff_pa = 0x%X", + user_index, + (uint32_t)buff_pa); + goto out; + } + + if (s->users[user_index].activated) { + /* + * Update user. + */ + + yagl_server_dispatch_update(s->ss, + s->users[user_index].process_id, + s->users[user_index].thread_id, + buff); + } else { + /* + * Activate user. + */ + + if (yagl_server_dispatch_init(s->ss, + buff, + &target_pid, + &target_tid)) { + s->users[user_index].activated = true; + s->users[user_index].process_id = target_pid; + s->users[user_index].thread_id = target_tid; + + YAGL_LOG_DEBUG("user %d activated", user_index); + + /* + * The buff is now owned by client. + */ + buff = NULL; + } + } + } else { + /* + * Deactivate user. + */ + + yagl_server_dispatch_exit(s->ss, + s->users[user_index].process_id, + s->users[user_index].thread_id); + + memset(&s->users[user_index], 0, sizeof(s->users[user_index])); + + YAGL_LOG_DEBUG("user %d deactivated", user_index); + } + +out: + if (buff) { + cpu_physical_memory_unmap(buff, + TARGET_PAGE_SIZE, + 0, + TARGET_PAGE_SIZE); + } + + YAGL_LOG_FUNC_EXIT(NULL); +} + +static void yagl_device_trigger(YaGLState *s, int user_index, bool sync) +{ + YAGL_LOG_FUNC_ENTER(yagl_device_trigger, "%d, %d", user_index, sync); + + if (s->users[user_index].activated) { + yagl_server_dispatch_batch(s->ss, + s->users[user_index].process_id, + s->users[user_index].thread_id, + sync); + } else { + YAGL_LOG_CRITICAL("user %d not activated", user_index); + } + + YAGL_LOG_FUNC_EXIT(NULL); +} + +static uint64_t yagl_device_read(void *opaque, hwaddr offset, + unsigned size) +{ + return 0; +} + +static void yagl_device_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + YaGLState *s = (YaGLState*)opaque; + int user_index = (offset / YAGL_REGS_SIZE); + offset -= user_index * YAGL_REGS_SIZE; + + assert(user_index < YAGL_MAX_USERS); + + if (user_index >= YAGL_MAX_USERS) { + YAGL_LOG_CRITICAL("bad user index = %d", user_index); + return; + } + + switch (offset) { + case YAGL_REG_BUFFPTR: + yagl_device_operate(s, user_index, value); + break; + case YAGL_REG_TRIGGER: + yagl_device_trigger(s, user_index, value); + break; + default: + YAGL_LOG_CRITICAL("user %d, bad offset = %d", user_index, offset); + break; + } +} + +static const MemoryRegionOps yagl_device_ops = +{ + .read = yagl_device_read, + .write = yagl_device_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int yagl_device_init(PCIDevice *dev) +{ + YaGLState *s = DO_UPCAST(YaGLState, dev, dev); + DisplayObject *dobj = NULL; + WorkQueueObject *wqobj = NULL; + WSIObject *wsiobj = NULL; + struct yagl_egl_driver *egl_driver = NULL; + struct yagl_egl_backend *egl_backend = NULL; + struct yagl_gles_driver *gles_driver = NULL; + + if (s->display) { + dobj = displayobject_find(s->display); + + if (!dobj) { + error_report("display '%s' not found", s->display); + return -1; + } + } else { + bool ambiguous; + + dobj = displayobject_create(&ambiguous); + + if (ambiguous) { + error_report("ambiguous display, set 'display' property"); + return -1; + } + + if (!dobj) { + error_report("unable to create display"); + return -1; + } + } + + if (s->render_queue) { + wqobj = workqueueobject_find(s->render_queue); + + if (!wqobj) { + error_report("work queue '%s' not found", s->render_queue); + return -1; + } + } else { + bool ambiguous; + + wqobj = workqueueobject_create(&ambiguous); + + if (ambiguous) { + error_report("ambiguous work queue, set 'render_queue' property"); + return -1; + } + + if (!wqobj) { + error_report("unable to create work queue"); + return -1; + } + } + + if (s->wsi) { + wsiobj = wsiobject_find(s->wsi); + if (!wsiobj) { + error_report("winsys interface '%s' not found", s->wsi); + return -1; + } + } + + yagl_log_init(); + + YAGL_LOG_FUNC_ENTER(yagl_device_init, NULL); + + memory_region_init_io(&s->iomem, OBJECT(s), + &yagl_device_ops, + s, + TYPE_YAGL_DEVICE, + YAGL_MEM_SIZE); + + yagl_handle_gen_init(); + + egl_driver = yagl_egl_driver_create(dobj->dpy); + + if (!egl_driver) { + goto fail; + } + + gles_driver = yagl_gles_ogl_create(egl_driver->dyn_lib, + egl_driver->gl_version); + + if (!gles_driver) { + goto fail; + } + + if (wsiobj && wsiobj->gl_wsi) { + egl_backend = yagl_egl_onscreen_create(wsiobj->gl_wsi, + egl_driver, + gles_driver); + gles_driver = yagl_gles_onscreen_create(gles_driver); + } else { + egl_backend = yagl_egl_offscreen_create(egl_driver, gles_driver); + } + + if (!egl_backend) { + goto fail; + } + + /* + * Now owned by EGL backend. + */ + egl_driver = NULL; + + s->ss = yagl_server_state_create(egl_backend, gles_driver, + wqobj->wq, + (wsiobj ? wsiobj->gl_wsi : NULL)); + + /* + * Owned/destroyed by server state. + */ + egl_backend = NULL; + gles_driver = NULL; + + if (!s->ss) { + goto fail; + } + + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->iomem); + + YAGL_LOG_FUNC_EXIT(NULL); + + return 0; + +fail: + if (gles_driver) { + gles_driver->destroy(gles_driver); + } + + if (egl_driver) { + egl_driver->destroy(egl_driver); + } + + yagl_handle_gen_cleanup(); + + YAGL_LOG_FUNC_EXIT(NULL); + + yagl_log_cleanup(); + + return -1; +} + +static void yagl_device_reset(DeviceState *d) +{ + YaGLState *s = container_of(d, YaGLState, dev.qdev); + int i; + + YAGL_LOG_FUNC_ENTER(yagl_device_reset, NULL); + + yagl_server_reset(s->ss); + + yagl_handle_gen_reset(); + + for (i = 0; i < YAGL_MAX_USERS; ++i) { + memset(&s->users[i], 0, sizeof(s->users[i])); + } + + YAGL_LOG_FUNC_EXIT(NULL); +} + +static void yagl_device_exit(PCIDevice *dev) +{ + YaGLState *s = DO_UPCAST(YaGLState, dev, dev); + + YAGL_LOG_FUNC_ENTER(yagl_device_exit, NULL); + - memory_region_destroy(&s->iomem); - + yagl_server_state_destroy(s->ss); + + yagl_handle_gen_cleanup(); + + YAGL_LOG_FUNC_EXIT(NULL); + + yagl_log_cleanup(); +} + +static Property yagl_properties[] = { + DEFINE_PROP_STRING("display", YaGLState, display), + DEFINE_PROP_STRING("render_queue", YaGLState, render_queue), + DEFINE_PROP_STRING("wsi", YaGLState, wsi), + DEFINE_PROP_END_OF_LIST(), +}; + +static void yagl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->init = yagl_device_init; + k->exit = yagl_device_exit; + k->vendor_id = PCI_VENDOR_ID_YAGL; + k->device_id = PCI_DEVICE_ID_YAGL; + k->class_id = PCI_CLASS_OTHERS; + dc->reset = yagl_device_reset; + dc->props = yagl_properties; + dc->desc = "YaGL device"; +} + +static TypeInfo yagl_device_info = +{ + .name = TYPE_YAGL_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(YaGLState), + .class_init = yagl_class_init, +}; + +static void yagl_register_types(void) +{ + type_register_static(&yagl_device_info); +} + +type_init(yagl_register_types) diff --cc include/sysemu/arch_init.h index 9cad88bcde,54b36c16c4..f2f3dc9f68 --- a/include/sysemu/arch_init.h +++ b/include/sysemu/arch_init.h @@@ -29,12 -30,11 +30,12 @@@ extern const uint32_t arch_type void select_soundhw(const char *optarg); void do_acpitable_option(const QemuOpts *opts); void do_smbios_option(QemuOpts *opts); + void ram_mig_init(void); void cpudef_init(void); void audio_init(void); - int tcg_available(void); int kvm_available(void); int xen_available(void); +int hax_available(void); CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp); diff --cc target-i386/hax-all.c index 7069be91ec,0000000000..62e4ffae35 mode 100644,000000..100644 --- a/target-i386/hax-all.c +++ b/target-i386/hax-all.c @@@ -1,1339 -1,0 +1,1370 @@@ +/* + * QEMU KVM support + * + * Copyright IBM, Corp. 2008 + * Red Hat, Inc. 2008 + * + * Authors: + * Anthony Liguori + * Glauber Costa + * + * Copyright (c) 2011 Intel Corporation + * Written by: + * Jiang Yunhong + * Xin Xiaohui + * Zhang Xiantao + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +/* + * HAX common code for both windows and darwin + * some code from KVM side + */ + +#include "strings.h" +#include "hax-i386.h" +#include "sysemu/kvm.h" +#include "exec/address-spaces.h" +#include "qemu/main-loop.h" + +#define HAX_EMUL_ONE 0x1 +#define HAX_EMUL_REAL 0x2 +#define HAX_EMUL_HLT 0x4 +#define HAX_EMUL_EXITLOOP 0x5 + +#define HAX_EMULATE_STATE_MMIO 0x1 +#define HAX_EMULATE_STATE_REAL 0x2 +#define HAX_EMULATE_STATE_NONE 0x3 +#define HAX_EMULATE_STATE_INITIAL 0x4 + +#define HAX_NON_UG_PLATFORM 0x0 +#define HAX_UG_PLATFORM 0x1 + +static void hax_vcpu_sync_state(CPUArchState *env, int modified); +static int hax_arch_get_registers(CPUArchState *env); +static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, int dir, int size, int cnt, void *buf); +static int hax_handle_fastmmio(CPUArchState *env, struct hax_fastmmio *hft); + +struct hax_state hax_global; + +int ug_support = 0; + +bool hax_allowed; + +/* Called after hax_init_internal */ +int hax_ug_platform(void) +{ + return (ug_support); +} + +/* Currently non-PG modes are emulated by QEMU */ +int hax_vcpu_emulation_mode(CPUState *cpu) +{ + CPUArchState *env = (CPUArchState *)(cpu->env_ptr); + return !(env->cr[0] & CR0_PG_MASK); +} + +static int hax_prepare_emulation(CPUArchState *env) +{ + /* Flush all emulation states */ + tlb_flush(ENV_GET_CPU(env), 1); + tb_flush(env); + /* Sync the vcpu state from hax kernel module */ + hax_vcpu_sync_state(env, 0); + return 0; +} + +/* + * Check whether to break the translation block loop + * break tbloop after one MMIO emulation, or after finish emulation mode + */ +static int hax_stop_tbloop(CPUArchState *env) +{ + CPUState *cpu = ENV_GET_CPU(env); + switch (cpu->hax_vcpu->emulation_state) + { + case HAX_EMULATE_STATE_MMIO: + if (cpu->hax_vcpu->resync) { + hax_prepare_emulation(env); + cpu->hax_vcpu->resync = 0; + return 0; + } + return 1; + break; + case HAX_EMULATE_STATE_INITIAL: + case HAX_EMULATE_STATE_REAL: + if (!hax_vcpu_emulation_mode(cpu)) + return 1; + break; + default: + dprint("Invalid emulation state in hax_sto_tbloop state %x\n", + cpu->hax_vcpu->emulation_state); + break; + } + + return 0; +} + +int hax_stop_emulation(CPUState *cpu) +{ + CPUArchState *env = (CPUArchState *)(cpu->env_ptr); + + if (hax_stop_tbloop(env)) + { + cpu->hax_vcpu->emulation_state = HAX_EMULATE_STATE_NONE; + /* + * QEMU emulation changes vcpu state, + * Sync the vcpu state to HAX kernel module + */ + hax_vcpu_sync_state(env, 1); + return 1; + } + + return 0; +} + +int hax_stop_translate(CPUState *cpu) +{ + struct hax_vcpu_state *vstate = cpu->hax_vcpu; + + assert(vstate->emulation_state); + if (vstate->emulation_state == HAX_EMULATE_STATE_MMIO ) + return 1; + + return 0; +} + +int valid_hax_tunnel_size(uint16_t size) +{ + return size >= sizeof(struct hax_tunnel); +} + +hax_fd hax_vcpu_get_fd(CPUArchState *env) +{ + struct hax_vcpu_state *vcpu = ENV_GET_CPU(env)->hax_vcpu; + if (!vcpu) + return HAX_INVALID_FD; + return vcpu->fd; +} + +/* Current version */ +uint32_t hax_cur_version = 0x3; // ver 2.0: support fast mmio +/* Least HAX kernel version */ +uint32_t hax_lest_version = 0x3; + +static int hax_get_capability(struct hax_state *hax) +{ + int ret; + struct hax_capabilityinfo capinfo, *cap = &capinfo; + + ret = hax_capability(hax, cap); + if (ret) + return ret; + + if ( ((cap->wstatus & HAX_CAP_WORKSTATUS_MASK) == + HAX_CAP_STATUS_NOTWORKING )) + { + if (cap->winfo & HAX_CAP_FAILREASON_VT) + dprint("VTX feature is not enabled. which will cause HAX driver not working.\n"); + else if (cap->winfo & HAX_CAP_FAILREASON_NX) + dprint("NX feature is not enabled, which will cause HAX driver not working.\n"); + return -ENXIO; + } + + if ((cap->winfo & HAX_CAP_UG)) + ug_support = 1; + + if (cap->wstatus & HAX_CAP_MEMQUOTA) + { + if (cap->mem_quota < hax->mem_quota) + { + dprint("The memory needed by this VM exceeds the driver limit.\n"); + return -ENOSPC; + } + } + return 0; +} + +static int hax_version_support(struct hax_state *hax) +{ + int ret; + struct hax_module_version version; + + ret = hax_mod_version(hax, &version); + if (ret < 0) + return 0; + + if ( (hax_lest_version > version.cur_version) || + (hax_cur_version < version.compat_version) ) + return 0; + + return 1; +} + +int hax_vcpu_create(int id) +{ + struct hax_vcpu_state *vcpu = NULL; + int ret; + + if (!hax_global.vm) + { + dprint("vcpu %x created failed, vm is null\n", id); + return -1; + } + + if (hax_global.vm->vcpus[id]) + { + dprint("vcpu %x allocated already\n", id); + return 0; + } + + vcpu = g_malloc(sizeof(struct hax_vcpu_state)); + if (!vcpu) + { + dprint("Failed to alloc vcpu state\n"); + return -ENOMEM; + } + + memset(vcpu, 0, sizeof(struct hax_vcpu_state)); + + ret = hax_host_create_vcpu(hax_global.vm->fd, id); + if (ret) + { + dprint("Failed to create vcpu %x\n", id); + goto error; + } + + vcpu->vcpu_id = id; + vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id); + if (hax_invalid_fd(vcpu->fd)) + { + dprint("Failed to open the vcpu\n"); + ret = -ENODEV; + goto error; + } + + hax_global.vm->vcpus[id] = vcpu; + + ret = hax_host_setup_vcpu_channel(vcpu); + if (ret) + { + dprint("Invalid hax tunnel size \n"); + ret = -EINVAL; + goto error; + } + return 0; + +error: + /* vcpu and tunnel will be closed automatically */ + if (vcpu && !hax_invalid_fd(vcpu->fd)) + hax_close_fd(vcpu->fd); + + hax_global.vm->vcpus[id] = NULL; + g_free(vcpu); + return -1; +} + +int hax_vcpu_destroy(CPUState *cpu) +{ + struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + + if (!hax_global.vm) + { + dprint("vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id); + return -1; + } + + if (!vcpu) + return 0; + + /* + * 1. The hax_tunnel is also destroied when vcpu destroy + * 2. close fd will cause hax module vcpu be cleaned + */ + hax_close_fd(vcpu->fd); + hax_global.vm->vcpus[vcpu->vcpu_id] = NULL; + g_free(vcpu); + return 0; +} + +int hax_init_vcpu(CPUState *cpu) +{ + int ret; + + ret = hax_vcpu_create(cpu->cpu_index); + if (ret < 0) + { + dprint("Failed to create HAX vcpu\n"); + exit(-1); + } + + cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index]; + cpu->hax_vcpu->emulation_state = HAX_EMULATE_STATE_INITIAL; + cpu->hax_vcpu_dirty = 1; + qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *)(cpu->env_ptr)); + + return ret; +} + +struct hax_vm *hax_vm_create(struct hax_state *hax) +{ + struct hax_vm *vm; + int vm_id = 0, ret; + char *vm_name = NULL; + + if (hax_invalid_fd(hax->fd)) + return NULL; + + if (hax->vm) + return hax->vm; + + vm = g_malloc(sizeof(struct hax_vm)); + if (!vm) + return NULL; + memset(vm, 0, sizeof(struct hax_vm)); + ret = hax_host_create_vm(hax, &vm_id); + if (ret) { + dprint("Failed to create vm %x\n", ret); + goto error; + } + vm->id = vm_id; + vm->fd = hax_host_open_vm(hax, vm_id); + if (hax_invalid_fd(vm->fd)) + { + dprint("Open the vm devcie error:%s\n", vm_name); + goto error; + } + + hax->vm = vm; + dprint("End of VM create, id %d\n", vm->id); + return vm; + +error: + g_free(vm); + hax->vm = NULL; + return NULL; +} + +int hax_vm_destroy(struct hax_vm *vm) +{ + int i; + + for (i = 0; i < HAX_MAX_VCPU; i++) + if (vm->vcpus[i]) + { + dprint("VCPU should be cleaned before vm clean\n"); + return -1; + } + hax_close_fd(vm->fd); + g_free(vm); + hax_global.vm = NULL; + return 0; +} + +static void +hax_region_add(MemoryListener *listener, MemoryRegionSection *section) +{ + hax_set_phys_mem(section); +} + +static void +hax_region_del(MemoryListener *listener, MemoryRegionSection *section) +{ + hax_set_phys_mem(section); +} + + +/* currently we fake the dirty bitmap sync, always dirty */ +// avoid implicit declaration warning on Windows +int ffsl(long value); +static void hax_log_sync(MemoryListener *listener, MemoryRegionSection *section) +{ + MemoryRegion *mr = section->mr; + unsigned long c; + unsigned int len = ((int128_get64(section->size) / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / + HOST_LONG_BITS; + unsigned long bitmap[len]; + int i, j; + + for (i = 0; i < len; i++) { + bitmap[i] = 1; + c = leul_to_cpu(bitmap[i]); + do { + j = ffsl(c) - 1; + c &= ~(1ul << j); + memory_region_set_dirty(mr, (i * HOST_LONG_BITS + j) * + TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); + } while (c != 0); + } +} + +static void hax_log_global_start(struct MemoryListener *listener) +{ +} + +static void hax_log_global_stop(struct MemoryListener *listener) +{ +} + +static void hax_log_start(MemoryListener *listener, + MemoryRegionSection *section) +{ +} + +static void hax_log_stop(MemoryListener *listener, + MemoryRegionSection *section) +{ +} + +static void hax_begin(MemoryListener *listener) +{ +} + +static void hax_commit(MemoryListener *listener) +{ +} + +static void hax_region_nop(MemoryListener *listener, + MemoryRegionSection *section) +{ +} + +static MemoryListener hax_memory_listener = { + .begin = hax_begin, + .commit = hax_commit, + .region_add = hax_region_add, + .region_del = hax_region_del, + .region_nop = hax_region_nop, + .log_start = hax_log_start, + .log_stop = hax_log_stop, + .log_sync = hax_log_sync, + .log_global_start = hax_log_global_start, + .log_global_stop = hax_log_global_stop, +}; + +static void hax_handle_interrupt(CPUState *cpu, int mask) +{ + cpu->interrupt_request |= mask; + + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } +} + +static int hax_init_internal(void) +{ + struct hax_state *hax = NULL; + struct hax_qemu_version qversion; + int ret; + + hax = &hax_global; + + + hax->fd = hax_mod_open(); + if (hax_invalid_fd(hax->fd)) + { + hax->fd = 0; + ret = -ENODEV; + goto error; + } + + ret = hax_get_capability(hax); + + if (ret) + { + if (ret != -ENOSPC) + ret = -EINVAL; + goto error; + } + + if (!hax_version_support(hax)) + { + dprint("Incompat Hax version. Qemu current version %x ", hax_cur_version ); + dprint("requires least HAX version %x\n", hax_lest_version); + ret = -EINVAL; + goto error; + } + + hax->vm = hax_vm_create(hax); + if (!hax->vm) + { + dprint("Failed to create HAX VM\n"); + ret = -EINVAL; + goto error; + } + + memory_listener_register(&hax_memory_listener, &address_space_memory); + + qversion.cur_version = hax_cur_version; + qversion.least_version = hax_lest_version; + hax_notify_qemu_version(hax->vm->fd, &qversion); + cpu_interrupt_handler = hax_handle_interrupt; + + return ret; +error: + if (hax->vm) + hax_vm_destroy(hax->vm); + if (hax->fd) + hax_mod_close(hax); + + return ret; +} + - int hax_accel_init(uint64_t ram_size) ++static int hax_init(MachineState *ms) +{ + struct hax_state *hax = NULL; + int ret = 0; ++ uint64_t ram_size = ++ qemu_opt_get_number(qemu_find_opts_singleton("memory"), "size"); + + hax = &hax_global; + memset(hax, 0, sizeof(struct hax_state)); + hax->mem_quota = ram_size; + dprint("ram_size %llx\n", ram_size); + + ret = hax_init_internal(); + // It is better to fail than to transit implicitly. + /* + if (ret && (ret != -ENOSPC)) { + dprint("No accelerator found.\n"); + } else { + dprint("HAX is %s and emulator runs in %s mode.\n", + !ret ? "working" : "not working", + !ret ? "fast virt" : "emulation"); + } + */ + ++ if (!ret && !hax_ug_platform()) { ++ // TCG is used for very limited purpose under HAX. ++ // so we can allocate small TB buffer. ++ tcg_exec_init(16 * 1024 * 1024); ++ } ++ + return ret; +} + +static int hax_handle_fastmmio(CPUArchState *env, struct hax_fastmmio *hft) +{ + uint64_t buf = 0; + /* + * With fast MMIO, QEMU need not sync vCPU state with HAXM + * driver because it will only invoke MMIO handler + * However, some MMIO operations utilize virtual address like qemu_pipe + * Thus we need to sync the CR0, CR3 and CR4 so that QEMU + * can translate the guest virtual address to guest physical + * address + */ + env->cr[0] = hft->_cr0; + env->cr[2] = hft->_cr2; + env->cr[3] = hft->_cr3; + env->cr[4] = hft->_cr4; + + buf = hft->value; + + cpu_physical_memory_rw(hft->gpa, (uint8_t *)&buf, hft->size, hft->direction); + if (hft->direction == 0) + hft->value = buf; + + return 0; +} + +static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, int direction, + int size, int count, void *buffer) +{ + uint8_t *ptr; + int i; + + if (!df) + ptr = (uint8_t *)buffer; + else + ptr = buffer + size * count - size; + for (i = 0; i < count; i++) + { + if (direction == HAX_EXIT_IO_IN) { + switch (size) { + case 1: + stb_p(ptr, cpu_inb(port)); + break; + case 2: + stw_p(ptr, cpu_inw(port)); + break; + case 4: + stl_p(ptr, cpu_inl(port)); + break; + } + } else { + switch (size) { + case 1: + cpu_outb(port, ldub_p(ptr)); + break; + case 2: + cpu_outw(port, lduw_p(ptr)); + break; + case 4: + cpu_outl(port, ldl_p(ptr)); + break; + } + } + if (!df) + ptr += size; + else + ptr -= size; + } + + return 0; +} + +static int hax_vcpu_interrupt(CPUArchState *env) +{ + CPUState *cpu = ENV_GET_CPU(env); + struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_tunnel *ht = vcpu->tunnel; + + /* + * Try to inject an interrupt if the guest can accept it + * Unlike KVM, HAX kernel check for the eflags, instead of qemu + */ + if (ht->ready_for_interrupt_injection && + (cpu->interrupt_request & CPU_INTERRUPT_HARD)) + { + int irq; + + irq = cpu_get_pic_interrupt(env); + if (irq >= 0) { + hax_inject_interrupt(env, irq); + cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + } + } + + /* If we have an interrupt but the guest is not ready to receive an + * interrupt, request an interrupt window exit. This will + * cause a return to userspace as soon as the guest is ready to + * receive interrupts. */ + if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) + ht->request_interrupt_window = 1; + else + ht->request_interrupt_window = 0; + return 0; +} + +void hax_raise_event(CPUState *cpu) +{ + struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + + if (!vcpu) + return; + vcpu->tunnel->user_event_pending = 1; +} + +/* + * Ask hax kernel module to run the CPU for us till: + * 1. Guest crash or shutdown + * 2. Need QEMU's emulation like guest execute MMIO instruction or guest + * enter emulation mode (non-PG mode) + * 3. Guest execute HLT + * 4. Qemu have Signal/event pending + * 5. An unknown VMX exit happens + */ +extern void qemu_system_reset_request(void); +static int hax_vcpu_hax_exec(CPUArchState *env, int ug_platform) +{ + int ret = 0; + CPUState *cpu = ENV_GET_CPU(env); + X86CPU *x86_cpu = X86_CPU(cpu); + struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_tunnel *ht = vcpu->tunnel; + + if(!ug_platform) + { + if (hax_vcpu_emulation_mode(cpu)) + { + dprint("Trying to vcpu execute at eip:" TARGET_FMT_lx "\n", env->eip); + return HAX_EMUL_EXITLOOP; + } + + cpu->halted = 0; + + if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; + apic_poll_irq(x86_cpu->apic_state); + } + } + else /* UG platform */ + { + if (!hax_enabled()) + { + dprint("Trying to vcpu execute at eip:" TARGET_FMT_lx "\n", env->eip); + return HAX_EMUL_EXITLOOP; + } + + cpu->halted = 0; + + if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; + apic_poll_irq(x86_cpu->apic_state); + } + + if (cpu->interrupt_request & CPU_INTERRUPT_INIT) { + fprintf(stderr, "\nUG hax_vcpu_hax_exec: handling INIT for %d \n", cpu->cpu_index); + do_cpu_init(x86_cpu); + hax_vcpu_sync_state(env, 1); + } + + if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { + fprintf(stderr, "UG hax_vcpu_hax_exec: handling SIPI for %d \n", cpu->cpu_index); + hax_vcpu_sync_state(env, 0); + do_cpu_sipi(x86_cpu); + hax_vcpu_sync_state(env, 1); + } + } + + //hax_cpu_synchronize_state(cpu); + + do { + int hax_ret; + + if (cpu->exit_request) { + ret = HAX_EMUL_EXITLOOP ; + break; + } + +#if 0 + if (cpu->hax_vcpu_dirty) { + hax_vcpu_sync_state(env, 1); + cpu->hax_vcpu_dirty = 0; + } +#endif + + hax_vcpu_interrupt(env); + if (!ug_platform) + { + hax_ret = hax_vcpu_run(vcpu); + } + else /* UG platform */ + { + qemu_mutex_unlock_iothread(); + hax_ret = hax_vcpu_run(vcpu); + qemu_mutex_lock_iothread(); + current_cpu = cpu; + } + + /* Simply continue the vcpu_run if system call interrupted */ + if (hax_ret == -EINTR || hax_ret == -EAGAIN) { + dprint("io window interrupted\n"); + continue; + } + + if (hax_ret < 0) + { + dprint("vcpu run failed for vcpu %x\n", vcpu->vcpu_id); + abort(); + } + switch (ht->_exit_status) + { + case HAX_EXIT_IO: + { + ret = hax_handle_io(env, ht->pio._df, ht->pio._port, + ht->pio._direction, + ht->pio._size, ht->pio._count, vcpu->iobuf); + } + break; + case HAX_EXIT_MMIO: + ret = HAX_EMUL_ONE; + break; + case HAX_EXIT_FAST_MMIO: + ret = hax_handle_fastmmio(env, + (struct hax_fastmmio *)vcpu->iobuf); + break; + case HAX_EXIT_REAL: + ret = HAX_EMUL_REAL; + break; + /* Guest state changed, currently only for shutdown */ + case HAX_EXIT_STATECHANGE: + dprint("VCPU shutdown request\n"); + qemu_system_reset_request(); + hax_prepare_emulation(env); + cpu_dump_state(cpu, stderr, fprintf, 0); + ret = HAX_EMUL_EXITLOOP; + break; + case HAX_EXIT_UNKNOWN_VMEXIT: + dprint("Unknown VMX exit %x from guest\n", ht->_exit_reason); + qemu_system_reset_request(); + hax_prepare_emulation(env); + cpu_dump_state(cpu, stderr, fprintf, 0); + ret = HAX_EMUL_EXITLOOP; + break; + case HAX_EXIT_HLT: + if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) && + !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + /* hlt instruction with interrupt disabled is shutdown */ + env->eflags |= IF_MASK; + cpu->halted = 1; + cpu->exception_index = EXCP_HLT; + ret = HAX_EMUL_HLT; + } + break; + /* these situation will continue to hax module */ + case HAX_EXIT_INTERRUPT: + case HAX_EXIT_PAUSED: + break; + default: + dprint("Unknow exit %x from hax\n", ht->_exit_status); + qemu_system_reset_request(); + hax_prepare_emulation(env); + cpu_dump_state(cpu, stderr, fprintf, 0); + ret = HAX_EMUL_EXITLOOP; + break; + } + }while (!ret); + + if (cpu->exit_request) { + cpu->exit_request = 0; + cpu->exception_index = EXCP_INTERRUPT; + } + return ret; +} + +#if 0 +static void do_hax_cpu_synchronize_state(void *_env) +{ + CPUArchState *env = _env; + CPUState *cpu = ENV_GET_CPU(env); + if (!cpu->hax_vcpu_dirty) { + hax_vcpu_sync_state(env, 0); + cpu->hax_vcpu_dirty = 1; + } +} + +static void hax_cpu_synchronize_state(CPUState *cpu) +{ + if (!cpu->hax_vcpu_dirty) { + run_on_cpu(cpu, do_hax_cpu_synchronize_state, cpu); + } +} +#endif + +void hax_cpu_synchronize_post_reset(CPUState *cpu) +{ + CPUArchState *env = (CPUArchState *)(cpu->env_ptr); + hax_vcpu_sync_state(env, 1); + cpu->hax_vcpu_dirty = 0; +} + +void hax_cpu_synchronize_post_init(CPUState *cpu) +{ + CPUArchState *env = (CPUArchState *)(cpu->env_ptr); + hax_vcpu_sync_state(env, 1); + cpu->hax_vcpu_dirty = 0; +} + +/* + * return 1 when need emulate, 0 when need exit loop + */ +int hax_vcpu_exec(CPUState *cpu) +{ + int next = 0, ret = 0; + struct hax_vcpu_state *vcpu; + CPUArchState *env = (CPUArchState *)(cpu->env_ptr); + + if (cpu->hax_vcpu->emulation_state != HAX_EMULATE_STATE_NONE) + return 1; + + vcpu = cpu->hax_vcpu; + next = hax_vcpu_hax_exec(env, HAX_NON_UG_PLATFORM); + switch (next) + { + case HAX_EMUL_ONE: + ret = 1; + vcpu->emulation_state = HAX_EMULATE_STATE_MMIO; + hax_prepare_emulation(env); + break; + case HAX_EMUL_REAL: + ret = 1; + vcpu->emulation_state = + HAX_EMULATE_STATE_REAL; + hax_prepare_emulation(env); + break; + case HAX_EMUL_HLT: + case HAX_EMUL_EXITLOOP: + break; + default: + dprint("Unknown hax vcpu exec return %x\n", next); + abort(); + } + + return ret; +} + +int hax_smp_cpu_exec(CPUState *cpu) +{ + CPUArchState *env = (CPUArchState *)(cpu->env_ptr); + int why; + int ret; + + while (1) { + if (cpu->exception_index >= EXCP_INTERRUPT) { + ret = cpu->exception_index; + cpu->exception_index = -1; + break; + } + + why = hax_vcpu_hax_exec(env, HAX_UG_PLATFORM); + + if ((why != HAX_EMUL_HLT) && (why != HAX_EMUL_EXITLOOP)) + { + dprint("Unknown hax vcpu return %x\n", why); + abort(); + } + } + + return ret; +} + +#define HAX_RAM_INFO_ROM 0x1 + +static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs) +{ + memset(lhs, 0, sizeof(struct segment_desc_t )); + lhs->selector = rhs->selector; + lhs->base = rhs->base; + lhs->limit = rhs->limit; + lhs->type = 3; + lhs->present = 1; + lhs->dpl = 3; + lhs->operand_size = 0; + lhs->desc = 1; + lhs->long_mode = 0; + lhs->granularity = 0; + lhs->available = 0; +} + +static void get_seg(SegmentCache *lhs, const struct segment_desc_t *rhs) +{ + lhs->selector = rhs->selector; + lhs->base = rhs->base; + lhs->limit = rhs->limit; + lhs->flags = + (rhs->type << DESC_TYPE_SHIFT) + | (rhs->present * DESC_P_MASK) + | (rhs->dpl << DESC_DPL_SHIFT) + | (rhs->operand_size << DESC_B_SHIFT) + | (rhs->desc * DESC_S_MASK) + | (rhs->long_mode << DESC_L_SHIFT) + | (rhs->granularity * DESC_G_MASK) + | (rhs->available * DESC_AVL_MASK); +} + +static void set_seg(struct segment_desc_t *lhs, const SegmentCache *rhs) +{ + unsigned flags = rhs->flags; + + memset(lhs, 0, sizeof(struct segment_desc_t)); + lhs->selector = rhs->selector; + lhs->base = rhs->base; + lhs->limit = rhs->limit; + lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; + lhs->present = (flags & DESC_P_MASK) != 0; + lhs->dpl = rhs->selector & 3; + lhs->operand_size = (flags >> DESC_B_SHIFT) & 1; + lhs->desc = (flags & DESC_S_MASK) != 0; + lhs->long_mode = (flags >> DESC_L_SHIFT) & 1; + lhs->granularity = (flags & DESC_G_MASK) != 0; + lhs->available = (flags & DESC_AVL_MASK) != 0; +} + +static void hax_getput_reg(uint64_t *hax_reg, target_ulong *qemu_reg, int set) +{ + target_ulong reg = *hax_reg; + + if (set) + *hax_reg = *qemu_reg; + else + *qemu_reg = reg; +} + +/* The sregs has been synced with HAX kernel already before this call */ +static int hax_get_segments(CPUArchState *env, struct vcpu_state_t *sregs) +{ + get_seg(&env->segs[R_CS], &sregs->_cs); + get_seg(&env->segs[R_DS], &sregs->_ds); + get_seg(&env->segs[R_ES], &sregs->_es); + get_seg(&env->segs[R_FS], &sregs->_fs); + get_seg(&env->segs[R_GS], &sregs->_gs); + get_seg(&env->segs[R_SS], &sregs->_ss); + + get_seg(&env->tr, &sregs->_tr); + get_seg(&env->ldt, &sregs->_ldt); + env->idt.limit = sregs->_idt.limit; + env->idt.base = sregs->_idt.base; + env->gdt.limit = sregs->_gdt.limit; + env->gdt.base = sregs->_gdt.base; + return 0; +} + +static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs) +{ + if ((env->eflags & VM_MASK)) { + set_v8086_seg(&sregs->_cs, &env->segs[R_CS]); + set_v8086_seg(&sregs->_ds, &env->segs[R_DS]); + set_v8086_seg(&sregs->_es, &env->segs[R_ES]); + set_v8086_seg(&sregs->_fs, &env->segs[R_FS]); + set_v8086_seg(&sregs->_gs, &env->segs[R_GS]); + set_v8086_seg(&sregs->_ss, &env->segs[R_SS]); + } else { + set_seg(&sregs->_cs, &env->segs[R_CS]); + set_seg(&sregs->_ds, &env->segs[R_DS]); + set_seg(&sregs->_es, &env->segs[R_ES]); + set_seg(&sregs->_fs, &env->segs[R_FS]); + set_seg(&sregs->_gs, &env->segs[R_GS]); + set_seg(&sregs->_ss, &env->segs[R_SS]); + + if (env->cr[0] & CR0_PE_MASK) { + /* force ss cpl to cs cpl */ + sregs->_ss.selector = (sregs->_ss.selector & ~3) | + (sregs->_cs.selector & 3); + sregs->_ss.dpl = sregs->_ss.selector & 3; + } + } + + set_seg(&sregs->_tr, &env->tr); + set_seg(&sregs->_ldt, &env->ldt); + sregs->_idt.limit = env->idt.limit; + sregs->_idt.base = env->idt.base; + sregs->_gdt.limit = env->gdt.limit; + sregs->_gdt.base = env->gdt.base; + return 0; +} + +/* + * After get the state from the kernel module, some + * qemu emulator state need be updated also + */ +static int hax_setup_qemu_emulator(CPUArchState *env) +{ + +#define HFLAG_COPY_MASK ~( \ + HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ + HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ + HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ + HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) + + uint32_t hflags; + + hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; + hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); + hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & + (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); + hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); + hflags |= (env->cr[4] & CR4_OSFXSR_MASK) << + (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); + + if (env->efer & MSR_EFER_LMA) { + hflags |= HF_LMA_MASK; + } + + if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { + hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; + } else { + hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> + (DESC_B_SHIFT - HF_CS32_SHIFT); + hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> + (DESC_B_SHIFT - HF_SS32_SHIFT); + if (!(env->cr[0] & CR0_PE_MASK) || + (env->eflags & VM_MASK) || + !(hflags & HF_CS32_MASK)) { + hflags |= HF_ADDSEG_MASK; + } else { + hflags |= ((env->segs[R_DS].base | + env->segs[R_ES].base | + env->segs[R_SS].base) != 0) << + HF_ADDSEG_SHIFT; + } + } + env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; + return 0; +} + +static int hax_sync_vcpu_register(CPUArchState *env, int set) +{ + struct vcpu_state_t regs; + int ret; + memset(®s, 0, sizeof(struct vcpu_state_t)); + + if (!set) + { + ret = hax_sync_vcpu_state(env, ®s, 0); + if (ret < 0) + return -1; + } + + /*generic register */ + hax_getput_reg(®s._rax, &env->regs[R_EAX], set); + hax_getput_reg(®s._rbx, &env->regs[R_EBX], set); + hax_getput_reg(®s._rcx, &env->regs[R_ECX], set); + hax_getput_reg(®s._rdx, &env->regs[R_EDX], set); + hax_getput_reg(®s._rsi, &env->regs[R_ESI], set); + hax_getput_reg(®s._rdi, &env->regs[R_EDI], set); + hax_getput_reg(®s._rsp, &env->regs[R_ESP], set); + hax_getput_reg(®s._rbp, &env->regs[R_EBP], set); + + hax_getput_reg(®s._rflags, &env->eflags, set); + hax_getput_reg(®s._rip, &env->eip, set); + + if (set) + { + + regs._cr0 = env->cr[0]; + regs._cr2 = env->cr[2]; + regs._cr3 = env->cr[3]; + regs._cr4 = env->cr[4]; + hax_set_segments(env, ®s); + } + else + { + env->cr[0] = regs._cr0; + env->cr[2] = regs._cr2; + env->cr[3] = regs._cr3; + env->cr[4] = regs._cr4; + hax_get_segments(env, ®s); + } + + if (set) + { + ret = hax_sync_vcpu_state(env, ®s, 1); + if (ret < 0) + return -1; + } + if (!set) + hax_setup_qemu_emulator(env); + return 0; +} + +static void hax_msr_entry_set(struct vmx_msr *item, + uint32_t index, uint64_t value) +{ + item->entry = index; + item->value = value; +} + +static int hax_get_msrs(CPUArchState *env) +{ + struct hax_msr_data md; + struct vmx_msr *msrs = md.entries; + int ret, i, n; + + n = 0; + msrs[n++].entry = MSR_IA32_SYSENTER_CS; + msrs[n++].entry = MSR_IA32_SYSENTER_ESP; + msrs[n++].entry = MSR_IA32_SYSENTER_EIP; + msrs[n++].entry = MSR_IA32_TSC; + md.nr_msr = n; + ret = hax_sync_msr(env, &md, 0); + if (ret < 0) + return ret; + + for (i = 0; i < md.done; i++) { + switch (msrs[i].entry) { + case MSR_IA32_SYSENTER_CS: + env->sysenter_cs = msrs[i].value; + break; + case MSR_IA32_SYSENTER_ESP: + env->sysenter_esp = msrs[i].value; + break; + case MSR_IA32_SYSENTER_EIP: + env->sysenter_eip = msrs[i].value; + break; + case MSR_IA32_TSC: + env->tsc = msrs[i].value; + break; + } + } + + return 0; +} + +static int hax_set_msrs(CPUArchState *env) +{ + struct hax_msr_data md; + struct vmx_msr *msrs; + msrs = md.entries; + int n = 0; + + memset(&md, 0, sizeof(struct hax_msr_data)); + hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); + hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); + hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); + hax_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); + md.nr_msr = n; + md.done = 0; + + return hax_sync_msr(env, &md, 1); + +} + +static int hax_get_fpu(CPUArchState *env) +{ + struct fx_layout fpu; + int i, ret; + + ret = hax_sync_fpu(env, &fpu, 0); + if (ret < 0) + return ret; + + env->fpstt = (fpu.fsw >> 11) & 7; + env->fpus = fpu.fsw; + env->fpuc = fpu.fcw; + for (i = 0; i < 8; ++i) + env->fptags[i] = !((fpu.ftw >> i) & 1); + memcpy(env->fpregs, fpu.st_mm, sizeof(env->fpregs)); + + memcpy(env->xmm_regs, fpu.mmx_1, sizeof(fpu.mmx_1)); + memcpy((XMMReg *)(env->xmm_regs) + 8, fpu.mmx_2, sizeof(fpu.mmx_2)); + env->mxcsr = fpu.mxcsr; + + return 0; +} + +static int hax_set_fpu(CPUArchState *env) +{ + struct fx_layout fpu; + int i; + + memset(&fpu, 0, sizeof(fpu)); + fpu.fsw = env->fpus & ~(7 << 11); + fpu.fsw |= (env->fpstt & 7) << 11; + fpu.fcw = env->fpuc; + + for (i = 0; i < 8; ++i) + fpu.ftw |= (!env->fptags[i]) << i; + + memcpy(fpu.st_mm, env->fpregs, sizeof (env->fpregs)); + memcpy(fpu.mmx_1, env->xmm_regs, sizeof (fpu.mmx_1)); + memcpy(fpu.mmx_2, (XMMReg *)(env->xmm_regs) + 8, sizeof (fpu.mmx_2)); + + fpu.mxcsr = env->mxcsr; + + return hax_sync_fpu(env, &fpu, 1); +} + +static int hax_arch_get_registers(CPUArchState *env) +{ + int ret; + + ret = hax_sync_vcpu_register(env, 0); + if (ret < 0) + return ret; + + ret = hax_get_fpu(env); + if (ret < 0) + return ret; + + ret = hax_get_msrs(env); + if (ret < 0) + return ret; + + return 0; +} + +static int hax_arch_set_registers(CPUArchState *env) +{ + int ret; + ret = hax_sync_vcpu_register(env, 1); + + if (ret < 0) + { + dprint("Failed to sync vcpu reg\n"); + return ret; + } + ret = hax_set_fpu(env); + if (ret < 0) + { + dprint("FPU failed\n"); + return ret; + } + ret = hax_set_msrs(env); + if (ret < 0) + { + dprint("MSR failed\n"); + return ret; + } + + return 0; +} + +static void hax_vcpu_sync_state(CPUArchState *env, int modified) +{ + if (hax_enabled()) { + if (modified) + hax_arch_set_registers(env); + else + hax_arch_get_registers(env); + } +} + +/* + * much simpler than kvm, at least in first stage because: + * We don't need consider the device pass-through, we don't need + * consider the framebuffer, and we may even remove the bios at all + */ +int hax_sync_vcpus(void) +{ + if (hax_enabled()) + { + CPUState *cpu; + + cpu = first_cpu; + if (!cpu) + return 0; + + for (; cpu != NULL; cpu = CPU_NEXT(cpu)) { + int ret; + + ret = hax_arch_set_registers(cpu->env_ptr); + if (ret < 0) + { + dprint("Failed to sync HAX vcpu context\n"); + exit(1); + } + } + } + + return 0; +} +void hax_reset_vcpu_state(void *opaque) +{ + CPUState *cpu; + for (cpu = first_cpu; cpu != NULL; cpu = CPU_NEXT(cpu)) + { + dprint("*********ReSet hax_vcpu->emulation_state \n"); + cpu->hax_vcpu->emulation_state = HAX_EMULATE_STATE_INITIAL; + cpu->hax_vcpu->tunnel->user_event_pending = 0; + cpu->hax_vcpu->tunnel->ready_for_interrupt_injection = 0; + } +} + ++#define TYPE_HAX_ACCEL ACCEL_CLASS_NAME("hax") ++ ++static void hax_accel_class_init(ObjectClass *oc, void *data) ++{ ++ AccelClass *ac = ACCEL_CLASS(oc); ++ ac->name = "HAX"; ++ ac->init_machine = hax_init; ++ ac->allowed = &hax_allowed; ++} ++ ++static const TypeInfo hax_accel_type = { ++ .name = TYPE_HAX_ACCEL, ++ .parent = TYPE_ACCEL, ++ .class_init = hax_accel_class_init, ++// .instance_size = sizeof(hax_state), ++}; ++ ++static void hax_type_init(void) ++{ ++ type_register_static(&hax_accel_type); ++} ++ ++type_init(hax_type_init); diff --cc target-i386/translate.c index 79964db271,782f7d2666..78ffd481db --- a/target-i386/translate.c +++ b/target-i386/translate.c @@@ -27,11 -27,13 +27,14 @@@ #include "cpu.h" #include "disas/disas.h" #include "tcg-op.h" + #include "exec/cpu_ldst.h" + + #include "exec/helper-proto.h" + #include "exec/helper-gen.h" + + #include "trace-tcg.h" - #include "helper.h" - #define GEN_HELPER 1 - #include "helper.h" +#include "sysemu/hax.h" #define PREFIX_REPZ 0x01 #define PREFIX_REPNZ 0x02 diff --cc tizen/src/display/maru_sdl.c index adad35ec42,0000000000..eb13dc726e mode 100644,000000..100644 --- a/tizen/src/display/maru_sdl.c +++ b/tizen/src/display/maru_sdl.c @@@ -1,660 -1,0 +1,664 @@@ +/* + * SDL_WINDOWID hack + * + * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd. All rights reserved. + * + * Contact: + * Jinhyung Jo + * GiWoong Kim + * SeokYeon Hwang + * YeongKyoon Lee + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + */ + + +#include +#include "qemu/main-loop.h" +#include "emulator.h" +#include "emul_state.h" +#include "maru_display.h" +#include "maru_sdl_processing.h" +#include "hw/pci/maru_brightness.h" +#include "debug_ch.h" + +#include "eventcast/encode_fb.h" + +#include +#ifndef CONFIG_WIN32 +#include +#endif + +MULTI_DEBUG_CHANNEL(tizen, sdl); + +static DisplayChangeListener *dcl; + +static QEMUBH *sdl_init_bh; +static QEMUBH *sdl_resize_bh; +static QEMUBH *sdl_update_bh; +static DisplaySurface *dpy_surface; + +static SDL_Surface *surface_screen; +static SDL_Surface *surface_qemu; +static SDL_Surface *scaled_screen; +static SDL_Surface *rotated_screen; +static SDL_Surface *surface_guide; /* blank guide image */ + +static double current_scale_factor = 1.0; +static double current_screen_degree; +static pixman_filter_t sdl_pixman_filter; + +static bool sdl_invalidate; +static int sdl_alteration; + +static unsigned int sdl_skip_update; +static unsigned int sdl_skip_count; + +static bool blank_guide_enable; +static unsigned int blank_cnt; +#define MAX_BLANK_FRAME_CNT 10 +#define BLANK_GUIDE_IMAGE_PATH "../images/" +#define BLANK_GUIDE_IMAGE_NAME "blank-guide.png" + +#ifdef SDL_THREAD +QemuMutex sdl_mutex; +QemuCond sdl_cond; +static int sdl_thread_initialized; + +QemuThread sdl_thread; +static bool sdl_thread_exit; +#endif + +#define SDL_FLAGS (SDL_SWSURFACE | SDL_ASYNCBLIT | SDL_NOFRAME) +#define SDL_BPP 32 + +static void qemu_update(void); + +static void qemu_ds_sdl_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ + /* call sdl update */ +#ifdef SDL_THREAD + qemu_mutex_lock(&sdl_mutex); + + qemu_cond_signal(&sdl_cond); + + qemu_mutex_unlock(&sdl_mutex); +#else + qemu_update(); +#endif + + set_display_dirty(true); +} + +static void qemu_ds_sdl_switch(DisplayChangeListener *dcl, + struct DisplaySurface *new_surface) +{ + int console_width = 0, console_height = 0; + + sdl_skip_update = 0; + sdl_skip_count = 0; + + if (!new_surface) { + ERR("qemu_ds_sdl_switch : new_surface is NULL\n"); + return; + } + + console_width = surface_width(new_surface); + console_height = surface_height(new_surface); + + INFO("qemu_ds_sdl_switch : (%d, %d)\n", console_width, console_height); + + /* switch */ + dpy_surface = new_surface; + + if (surface_qemu != NULL) { + SDL_FreeSurface(surface_qemu); + surface_qemu = NULL; + } + ++ PixelFormat pf = qemu_pixelformat_from_pixman(dpy_surface->format); ++ + /* create surface_qemu */ + if (console_width == get_emul_resolution_width() && + console_height == get_emul_resolution_height()) { + INFO("create SDL screen : (%d, %d)\n", + console_width, console_height); + + surface_qemu = SDL_CreateRGBSurfaceFrom( + surface_data(dpy_surface), + console_width, console_height, + surface_bits_per_pixel(dpy_surface), + surface_stride(dpy_surface), - dpy_surface->pf.rmask, - dpy_surface->pf.gmask, - dpy_surface->pf.bmask, - dpy_surface->pf.amask); ++ pf.rmask, ++ pf.gmask, ++ pf.bmask, ++ pf.amask); + } else { + INFO("create blank screen : (%d, %d)\n", + get_emul_resolution_width(), get_emul_resolution_height()); + + surface_qemu = SDL_CreateRGBSurface( + SDL_SWSURFACE, + console_width, console_height, + surface_bits_per_pixel(dpy_surface), + 0, 0, 0, 0); + } + + if (surface_qemu == NULL) { + ERR("Unable to set the RGBSurface: %s\n", SDL_GetError()); + return; + } +} + +static SDL_Surface *get_blank_guide_image(void) +{ + if (surface_guide == NULL) { + unsigned int width = 0; + unsigned int height = 0; + char *guide_image_path = NULL; + void *guide_image_data = NULL; + ++ PixelFormat pf = qemu_pixelformat_from_pixman(dpy_surface->format); ++ + /* load png image */ + int path_len = strlen(get_bin_path()) + + strlen(BLANK_GUIDE_IMAGE_PATH) + + strlen(BLANK_GUIDE_IMAGE_NAME) + 1; + guide_image_path = g_malloc0(sizeof(char) * path_len); + snprintf(guide_image_path, path_len, "%s%s%s", + get_bin_path(), BLANK_GUIDE_IMAGE_PATH, + BLANK_GUIDE_IMAGE_NAME); + + guide_image_data = (void *) read_png_file( + guide_image_path, &width, &height); + + if (guide_image_data != NULL) { + surface_guide = SDL_CreateRGBSurfaceFrom( + guide_image_data, width, height, + get_emul_sdl_bpp(), width * 4, - dpy_surface->pf.bmask, - dpy_surface->pf.gmask, - dpy_surface->pf.rmask, - dpy_surface->pf.amask); ++ pf.bmask, ++ pf.gmask, ++ pf.rmask, ++ pf.amask); + } else { + ERR("failed to draw a blank guide image\n"); + } + + g_free(guide_image_path); + } + + return surface_guide; +} + +static void qemu_ds_sdl_refresh(DisplayChangeListener *dcl) +{ + if (sdl_alteration == 1) { + sdl_alteration = 0; + sdl_skip_update = 0; + sdl_skip_count = 0; + } + + /* draw cover image */ + if (sdl_skip_update && display_off) { + if (blank_cnt > MAX_BLANK_FRAME_CNT) { +#ifdef CONFIG_WIN32 + if (sdl_invalidate && get_emul_skin_enable()) { + draw_image(surface_screen, get_blank_guide_image()); + } +#endif + + return; + } else if (blank_cnt == MAX_BLANK_FRAME_CNT) { + if (blank_guide_enable == true && get_emul_skin_enable()) { + INFO("draw a blank guide image\n"); + + draw_image(surface_screen, get_blank_guide_image()); + } + } else if (blank_cnt == 0) { + /* If the display is turned off, + the screen does not update until the display is turned on */ + INFO("skipping of the display updating is started\n"); + } + + blank_cnt++; + + return; + } else { + if (blank_cnt != 0) { + INFO("skipping of the display updating is ended\n"); + blank_cnt = 0; + } + } + + /* draw framebuffer */ + if (sdl_invalidate) { + graphic_hw_invalidate(NULL); + } + graphic_hw_update(NULL); + + /* Usually, continuously updated. + When the display is turned off, + ten more updates the screen for a black screen. */ + if (display_off) { + if (++sdl_skip_count > 10) { + sdl_skip_update = 1; + } else { + sdl_skip_update = 0; + } + } else { + sdl_skip_count = 0; + sdl_skip_update = 0; + } + +#ifdef TARGET_ARM +#ifdef SDL_THREAD + qemu_mutex_lock(&sdl_mutex); +#endif + + /* + * It is necessary only for exynos4210 FIMD in connection with + * some WM (xfwm4, for example) + */ + + SDL_UpdateRect(surface_screen, 0, 0, 0, 0); + +#ifdef SDL_THREAD + qemu_mutex_unlock(&sdl_mutex); +#endif +#endif +} + +static DisplayChangeListenerOps dcl_ops = { + .dpy_name = "maru_sdl", + .dpy_gfx_update = qemu_ds_sdl_update, + .dpy_gfx_switch = qemu_ds_sdl_switch, + .dpy_refresh = qemu_ds_sdl_refresh, +}; + +static void qemu_update(void) +{ + if (sdl_alteration < 0) { + SDL_FreeSurface(scaled_screen); + SDL_FreeSurface(rotated_screen); + SDL_FreeSurface(surface_qemu); + surface_qemu = NULL; + + return; + } + + if (surface_qemu != NULL) { + maru_do_pixman_dpy_surface(dpy_surface->image); + + save_screenshot(dpy_surface); + + if (current_scale_factor != 1.0) { + rotated_screen = maru_do_pixman_rotate( + surface_qemu, rotated_screen, + (int)current_screen_degree); + scaled_screen = maru_do_pixman_scale( + rotated_screen, scaled_screen, sdl_pixman_filter); + + SDL_BlitSurface(scaled_screen, NULL, surface_screen, NULL); + } + else {/* current_scale_factor == 1.0 */ + if (current_screen_degree != 0.0) { + rotated_screen = maru_do_pixman_rotate( + surface_qemu, rotated_screen, + (int)current_screen_degree); + + SDL_BlitSurface(rotated_screen, NULL, surface_screen, NULL); + } else { + /* as-is */ + SDL_BlitSurface(surface_qemu, NULL, surface_screen, NULL); + } + } + + /* draw multi-touch finger points */ + MultiTouchState *mts = get_emul_multi_touch_state(); + if (mts->multitouch_enable != 0 && mts->finger_point_surface != NULL) { + int i = 0; + FingerPoint *finger = NULL; + int finger_point_size_half = mts->finger_point_size / 2; + SDL_Rect rect; + + for (i = 0; i < mts->finger_cnt; i++) { + finger = get_finger_point_from_slot(i); + if (finger != NULL && finger->id != 0) { + rect.x = finger->origin_x - finger_point_size_half; + rect.y = finger->origin_y - finger_point_size_half; + rect.w = rect.h = mts->finger_point_size; + + SDL_BlitSurface( + (SDL_Surface *)mts->finger_point_surface, + NULL, surface_screen, &rect); + } + } + } /* end of draw multi-touch */ + } + + SDL_UpdateRect(surface_screen, 0, 0, 0, 0); +} + + +#ifdef SDL_THREAD +static void *run_qemu_update(void *arg) +{ + qemu_mutex_lock(&sdl_mutex); + + while (1) { + qemu_cond_wait(&sdl_cond, &sdl_mutex); + if (sdl_thread_exit) { + INFO("make SDL Thread exit\n"); + break; + } + qemu_update(); + } + + qemu_mutex_unlock(&sdl_mutex); + + INFO("finish qemu_update routine\n"); + return NULL; +} +#endif + +static void maru_sdl_update_bh(void *opaque) +{ + graphic_hw_invalidate(NULL); +} + +static void maru_sdl_resize_bh(void *opaque) +{ + int surface_width = 0, surface_height = 0; + int display_width = 0, display_height = 0; + int temp = 0; + + INFO("Set up a video mode with the specified width, " + "height and bits-per-pixel\n"); + + sdl_alteration = 1; + sdl_skip_update = 0; + +#ifdef SDL_THREAD + qemu_mutex_lock(&sdl_mutex); +#endif + + /* get current setting information and calculate screen size */ + display_width = get_emul_resolution_width(); + display_height = get_emul_resolution_height(); + current_scale_factor = get_emul_win_scale(); + + short rotaton_type = get_emul_rotation(); + if (rotaton_type == ROTATION_PORTRAIT) { + current_screen_degree = 0.0; + } else if (rotaton_type == ROTATION_LANDSCAPE) { + current_screen_degree = 90.0; + temp = display_width; + display_width = display_height; + display_height = temp; + } else if (rotaton_type == ROTATION_REVERSE_PORTRAIT) { + current_screen_degree = 180.0; + } else if (rotaton_type == ROTATION_REVERSE_LANDSCAPE) { + current_screen_degree = 270.0; + temp = display_width; + display_width = display_height; + display_height = temp; + } + + surface_width = display_width * current_scale_factor; + surface_height = display_height * current_scale_factor; + + surface_screen = SDL_SetVideoMode( + surface_width, surface_height, + get_emul_sdl_bpp(), SDL_FLAGS); + + INFO("SDL_SetVideoMode\n"); + + if (surface_screen == NULL) { + ERR("Could not open SDL display (%dx%dx%d) : %s\n", + surface_width, surface_height, + get_emul_sdl_bpp(), SDL_GetError()); + +#ifdef SDL_THREAD + qemu_mutex_unlock(&sdl_mutex); +#endif + + return; + } + + SDL_UpdateRect(surface_screen, 0, 0, 0, 0); + + /* create buffer for image processing */ + SDL_FreeSurface(scaled_screen); + scaled_screen = SDL_CreateRGBSurface(SDL_SWSURFACE, + surface_width, surface_height, + surface_qemu->format->BitsPerPixel, + surface_qemu->format->Rmask, + surface_qemu->format->Gmask, + surface_qemu->format->Bmask, + surface_qemu->format->Amask); + + SDL_FreeSurface(rotated_screen); + rotated_screen = SDL_CreateRGBSurface(SDL_SWSURFACE, + display_width, display_height, + surface_qemu->format->BitsPerPixel, + surface_qemu->format->Rmask, + surface_qemu->format->Gmask, + surface_qemu->format->Bmask, + surface_qemu->format->Amask); + + /* rearrange multi-touch finger points */ + if (get_emul_multi_touch_state()->multitouch_enable == 1 || + get_emul_multi_touch_state()->multitouch_enable == 2) { + rearrange_finger_points(get_emul_resolution_width(), get_emul_resolution_height(), + current_scale_factor, rotaton_type); + } + +#ifdef SDL_THREAD + qemu_mutex_unlock(&sdl_mutex); +#endif + + graphic_hw_invalidate(NULL); +} + +static void maru_sdl_init_bh(void *opaque) +{ + INFO("SDL_Init\n"); + + if (SDL_Init(SDL_INIT_VIDEO) < 0) { + ERR("unable to init SDL: %s\n", SDL_GetError()); + // TODO: + } + +#ifndef CONFIG_WIN32 + SDL_SysWMinfo info; + SDL_VERSION(&info.version); + SDL_GetWMInfo(&info); +#endif + + sdl_resize_bh = qemu_bh_new(maru_sdl_resize_bh, NULL); + sdl_update_bh = qemu_bh_new(maru_sdl_update_bh, NULL); + qemu_bh_schedule(sdl_resize_bh); + +#ifdef SDL_THREAD + if (sdl_thread_initialized == 0) { + sdl_thread_initialized = 1; + + INFO("sdl update thread create\n"); + + sdl_thread_exit = false; + qemu_thread_create(&sdl_thread, "sdl-workthread", run_qemu_update, + NULL, QEMU_THREAD_JOINABLE); + } +#endif +} + +static void maru_sdl_quit(void) +{ + INFO("maru sdl quit\n"); + + if (surface_guide != NULL) { + g_free(surface_guide->pixels); + SDL_FreeSurface(surface_guide); + } + + /* remove multi-touch finger points */ + cleanup_multi_touch_state(); + + if (sdl_init_bh != NULL) { + qemu_bh_delete(sdl_init_bh); + } + if (sdl_resize_bh != NULL) { + qemu_bh_delete(sdl_resize_bh); + } + + sdl_alteration = -1; + +#ifdef SDL_THREAD + qemu_mutex_lock(&sdl_mutex); +#endif + + SDL_Quit(); + +#ifdef SDL_THREAD + sdl_thread_exit = true; + qemu_cond_signal(&sdl_cond); + qemu_mutex_unlock(&sdl_mutex); + + INFO("join SDL thread\n"); + qemu_thread_join(&sdl_thread); + + INFO("destroy cond and mutex of SDL thread\n"); + qemu_cond_destroy(&sdl_cond); + qemu_mutex_destroy(&sdl_mutex); +#endif + + unregister_displaychangelistener(dcl); + g_free(dcl); +} + +static void maru_sdl_resize(void) +{ + INFO("maru sdl resize\n"); + + if (sdl_resize_bh != NULL) { + qemu_bh_schedule(sdl_resize_bh); + } +} + +static void maru_sdl_update(void) +{ + if (sdl_update_bh != NULL) { + qemu_bh_schedule(sdl_update_bh); + } +} + +static void maru_sdl_set_invalidate(bool on) +{ + sdl_invalidate = on; +} + +static void maru_sdl_set_interpolation(bool on) +{ + if (on == true) { + INFO("set PIXMAN_FILTER_BEST filter for image processing\n"); + + /* PIXMAN_FILTER_BILINEAR */ + sdl_pixman_filter = PIXMAN_FILTER_BEST; + } else { + INFO("set PIXMAN_FILTER_FAST filter for image processing\n"); + + /* PIXMAN_FILTER_NEAREST */ + sdl_pixman_filter = PIXMAN_FILTER_FAST; + } +} + +static void maru_sdl_init(uint64 swt_handle, + unsigned int display_width, unsigned int display_height, + bool blank_guide) +{ + gchar SDL_windowhack[32] = { 0, }; + long window_id = swt_handle; + blank_guide_enable = blank_guide; + + INFO("maru sdl init\n"); + + sprintf(SDL_windowhack, "%ld", window_id); + g_setenv("SDL_WINDOWID", SDL_windowhack, 1); + + INFO("register SDL environment variable. " + "(SDL_WINDOWID = %s)\n", SDL_windowhack); + + set_emul_resolution(display_width, display_height); + set_emul_sdl_bpp(SDL_BPP); + maru_sdl_set_interpolation(false); + init_multi_touch_state(); + + if (blank_guide_enable == true) { + INFO("blank guide is on\n"); + } + + qemu_bh_schedule(sdl_init_bh); +} + +void maru_sdl_pre_init(MaruDisplayChangeListener *mdcl) +{ + dcl = g_malloc0(sizeof(DisplayChangeListener)); + dcl->ops = &dcl_ops; + + mdcl->surface_init = maru_sdl_init; + mdcl->fini = maru_sdl_quit; + + mdcl->resize = maru_sdl_resize; + mdcl->update = maru_sdl_update; + mdcl->set_invalidate = maru_sdl_set_invalidate; + mdcl->set_interpolation = maru_sdl_set_interpolation; + + sdl_init_bh = qemu_bh_new(maru_sdl_init_bh, NULL); + +#ifdef SDL_THREAD + qemu_mutex_init(&sdl_mutex); + qemu_cond_init(&sdl_cond); +#endif + + register_displaychangelistener(dcl); +} + +bool maru_extract_framebuffer(void *buffer) +{ + uint32_t buffer_size = 0; + + if (!buffer) { + ERR("given buffer is null\n"); + return false; + } + + if (!surface_qemu) { + ERR("surface_qemu is null\n"); + return false; + } + + maru_do_pixman_dpy_surface(dpy_surface->image); + + buffer_size = surface_stride(dpy_surface) * surface_height(dpy_surface); + TRACE("extract framebuffer %d\n", buffer_size); + + memcpy(buffer, surface_data(dpy_surface), buffer_size); + return true; +} diff --cc tizen/src/ecs/ecs.c index 51a9a6a6de,0000000000..804e6c10be mode 100644,000000..100644 --- a/tizen/src/ecs/ecs.c +++ b/tizen/src/ecs/ecs.c @@@ -1,864 -1,0 +1,864 @@@ +/* + * Emulator Control Server + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd All Rights Reserved + * + * Contact: + * Jinhyung choi + * MunKyu Im + * Daiyoung Kim + * YeongKyoon Lee + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + */ + +#include +#include + +#include "hw/qdev.h" +#include "net/net.h" +#include "ui/console.h" + +#include "qemu-common.h" +#include "qemu/queue.h" +#include "qemu/sockets.h" +#include "qemu/option.h" +#include "qemu/timer.h" +#include "qemu/main-loop.h" +#include "sysemu/char.h" +#include "config.h" +#include "qapi/qmp/qint.h" + +#include "emulator.h" +#include "util/sdb.h" +#include "ecs.h" +#include "emul_state.h" + +#include "genmsg/ecs.pb-c.h" + +#include "debug_ch.h" +MULTI_DEBUG_CHANNEL(qemu, ecs); + +#define DEBUG + +#ifndef min +#define min(a,b) ((a)<(b)?(a):(b)) +#endif + +static QTAILQ_HEAD(ECS_ClientHead, ECS_Client) +clients = QTAILQ_HEAD_INITIALIZER(clients); + +static ECS_State *current_ecs; + +static void* keepalive_buf; +static int payloadsize; + +static int g_client_id = 1; + +static QemuMutex mutex_clilist; +QemuMutex mutex_guest_connection; +QemuMutex mutex_location_data; + +static QemuThread ecs_thread_id; + +static int suspend_state = 1; + +void ecs_set_suspend_state(int state) +{ + suspend_state = state; +} + +int ecs_get_suspend_state(void) +{ + return suspend_state; +} + +int ecs_write(int fd, const uint8_t *buf, int len) { + TRACE("write buflen : %d, buf : %s\n", len, (char*)buf); + if (fd < 0) { + return -1; + } + + return send_all(fd, buf, len); +} + +void ecs_client_close(ECS_Client* clii) { + if (clii == NULL) + return; + + qemu_mutex_lock(&mutex_clilist); + + if (clii->client_fd > 0) { + INFO("ecs client closed with fd: %d\n", clii->client_fd); + closesocket(clii->client_fd); +#ifndef CONFIG_LINUX + FD_CLR(clii->client_fd, &clii->cs->reads); +#endif + clii->client_fd = -1; + } + + QTAILQ_REMOVE(&clients, clii, next); + + g_free(clii); + clii = NULL; + + qemu_mutex_unlock(&mutex_clilist); +} + +bool send_to_all_client(const char* data, const int len) { + TRACE("data len: %d, data: %s\n", len, data); + qemu_mutex_lock(&mutex_clilist); + + ECS_Client *clii,*next; + + QTAILQ_FOREACH_SAFE(clii, &clients, next, next) + { + send_to_client(clii->client_fd, data, len); + } + qemu_mutex_unlock(&mutex_clilist); + + return true; +} + +void send_to_single_client(ECS_Client *clii, const char* data, const int len) +{ + qemu_mutex_lock(&mutex_clilist); + send_to_client(clii->client_fd, data, len); + qemu_mutex_unlock(&mutex_clilist); +} + +void send_to_client(int fd, const char* data, const int len) +{ + ecs_write(fd, (const uint8_t*) data, len); +} + +void read_val_short(const char* data, unsigned short* ret_val) { + memcpy(ret_val, data, sizeof(unsigned short)); +} + +void read_val_char(const char* data, unsigned char* ret_val) { + memcpy(ret_val, data, sizeof(unsigned char)); +} + +void read_val_str(const char* data, char* ret_val, int len) { + memcpy(ret_val, data, len); +} + +bool ntf_to_control(const char* data, const int len) { + return true; +} + +bool ntf_to_monitor(const char* data, const int len) { + return true; +} + +void print_binary(const char* data, const int len) { + int i; + printf("[DATA: "); + for(i = 0; i < len; i++) { + if(i == len - 1) { + printf("%02x]\n", data[i]); + } else { + printf("%02x,", data[i]); + } + } +} + +void ecs_make_header(QDict* obj, type_length length, type_group group, + type_action action) { + qdict_put(obj, "length", qint_from_int((int64_t )length)); + qdict_put(obj, "group", qint_from_int((int64_t )group)); + qdict_put(obj, "action", qint_from_int((int64_t )action)); +} + +static Monitor *monitor_create(void) { + Monitor *mon; + + mon = g_malloc0(sizeof(*mon)); + if (NULL == mon) { + ERR("monitor allocation failed.\n"); + return NULL; + } + + return mon; +} + +static void ecs_close(ECS_State *cs) { + ECS_Client *clii, *next; + INFO("### Good bye! ECS ###\n"); + + if (cs == NULL) + return; + + if (0 <= cs->listen_fd) { + INFO("close listen_fd: %d\n", cs->listen_fd); + closesocket(cs->listen_fd); + cs->listen_fd = -1; + } + + if (cs->mon != NULL) { + g_free(cs->mon); + cs->mon = NULL; + } + + if (keepalive_buf) { + g_free(keepalive_buf); + } + + if (cs->alive_timer != NULL) { + timer_del(cs->alive_timer); + cs->alive_timer = NULL; + } + + QTAILQ_FOREACH_SAFE(clii, &clients, next, next) + { + ecs_client_close(clii); + } + + g_free(cs); + cs = NULL; + current_ecs = NULL; + + qemu_mutex_destroy(&mutex_clilist); + qemu_mutex_destroy(&mutex_guest_connection); + qemu_mutex_destroy(&mutex_location_data); +} + +#ifndef _WIN32 +static ssize_t ecs_recv(int fd, char *buf, size_t len) { + struct msghdr msg = { NULL, }; + struct iovec iov[1]; + union { + struct cmsghdr cmsg; + char control[CMSG_SPACE(sizeof(int))]; + } msg_control; + int flags = 0; + + iov[0].iov_base = buf; + iov[0].iov_len = len; + + msg.msg_iov = iov; + msg.msg_iovlen = 1; + msg.msg_control = &msg_control; + msg.msg_controllen = sizeof(msg_control); + +#ifdef MSG_CMSG_CLOEXEC + flags |= MSG_CMSG_CLOEXEC; +#endif + return recvmsg(fd, &msg, flags); +} + +#else +static ssize_t ecs_recv(int fd, char *buf, size_t len) +{ + return qemu_recv(fd, buf, len, 0); +} +#endif + + +static void reset_sbuf(sbuf* sbuf) +{ + memset(sbuf->_buf, 0, 4096); + sbuf->_use = 0; + sbuf->_netlen = 0; +} + +static void ecs_read(ECS_Client *cli) { + + int read = 0; + int to_read_bytes = 0; + + if (cli == NULL) + { + ERR("client is null.\n"); + return; + } +#ifndef __WIN32 + if (ioctl(cli->client_fd, FIONREAD, &to_read_bytes) < 0) + { + ERR("ioctl failed\n"); + return; + } +#else + unsigned long to_read_bytes_long = 0; + if (ioctlsocket(cli->client_fd, FIONREAD, &to_read_bytes_long) < 0) + { + ERR("ioctl failed\n"); + return; + } + to_read_bytes = (int)to_read_bytes_long; +#endif + + if (to_read_bytes == 0) { + TRACE("ioctl FIONREAD: 0\n"); + goto fail; + } + + if (cli->sbuf._netlen == 0) + { + if (to_read_bytes < 4) + { + //LOG("insufficient data size to read\n"); + return; + } + + long payloadsize = 0; + read = ecs_recv(cli->client_fd, (char*) &payloadsize, 4); + + if (read < 4) + { + ERR("insufficient header size\n"); + goto fail; + } + + payloadsize = ntohl(payloadsize); + + cli->sbuf._netlen = payloadsize; + + TRACE("payload size: %ld\n", payloadsize); + + to_read_bytes -= 4; + } + + if (to_read_bytes == 0) + return; + + + to_read_bytes = min(to_read_bytes, cli->sbuf._netlen - cli->sbuf._use); + + read = ecs_recv(cli->client_fd, (char*)(cli->sbuf._buf + cli->sbuf._use), to_read_bytes); + if (read == 0) + goto fail; + + + cli->sbuf._use += read; + + + if (cli->sbuf._netlen == cli->sbuf._use) + { + handle_protobuf_msg(cli, (char*)cli->sbuf._buf, cli->sbuf._use); + reset_sbuf(&cli->sbuf); + } + + return; +fail: + ecs_client_close(cli); +} + +#ifdef CONFIG_LINUX +static void epoll_cli_add(ECS_State *cs, int fd) { + struct epoll_event events; + + /* event control set for read event */ + events.events = EPOLLIN; + events.data.fd = fd; + + if (epoll_ctl(cs->epoll_fd, EPOLL_CTL_ADD, fd, &events) < 0) { + ERR("Epoll control fails.in epoll_cli_add.\n"); + } +} +#endif + +static ECS_Client *ecs_find_client(int fd) { + ECS_Client *clii, *next; + + QTAILQ_FOREACH_SAFE(clii, &clients, next, next) + { + if (clii->client_fd == fd) + return clii; + } + return NULL; +} + +ECS_Client *find_client(unsigned char id, unsigned char type) { + ECS_Client *clii, *next; + + QTAILQ_FOREACH_SAFE(clii, &clients, next, next) + { + if (clii->client_id == id && clii->client_type == type) + return clii; + } + return NULL; +} + +static int ecs_add_client(ECS_State *cs, int fd) { + + ECS_Client *clii = g_malloc0(sizeof(ECS_Client)); + if (NULL == clii) { + ERR("ECS_Client allocation failed.\n"); + return -1; + } + + reset_sbuf(&clii->sbuf); + + qemu_set_nonblock(fd); + + clii->client_fd = fd; + clii->cs = cs; + clii->client_type = TYPE_NONE; + + ecs_json_message_parser_init(&clii->parser, handle_qmp_command, clii); + +#ifdef CONFIG_LINUX + epoll_cli_add(cs, fd); +#else + FD_SET(fd, &cs->reads); +#endif + + qemu_mutex_lock(&mutex_clilist); + + QTAILQ_INSERT_TAIL(&clients, clii, next); + + TRACE("Add an ecs client. fd: %d\n", fd); + + qemu_mutex_unlock(&mutex_clilist); + +// send_ecs_version_check(clii); + + return 0; +} + +static void ecs_accept(ECS_State *cs) { + struct sockaddr_in saddr; +#ifndef _WIN32 + struct sockaddr_un uaddr; +#endif + struct sockaddr *addr; + socklen_t len; + int fd; + + for (;;) { +#ifndef _WIN32 + if (cs->is_unix) { + len = sizeof(uaddr); + addr = (struct sockaddr *) &uaddr; + } else +#endif + { + len = sizeof(saddr); + addr = (struct sockaddr *) &saddr; + } + fd = qemu_accept(cs->listen_fd, addr, &len); + if (0 > fd && EINTR != errno) { + return; + } else if (0 <= fd) { + break; + } + } + if (0 > ecs_add_client(cs, fd)) { + ERR("failed to add client.\n"); + } +} + +#ifdef CONFIG_LINUX +static void epoll_init(ECS_State *cs) { + struct epoll_event events; + + cs->epoll_fd = epoll_create(MAX_EVENTS); + if (cs->epoll_fd < 0) { + closesocket(cs->listen_fd); + } + + events.events = EPOLLIN; + events.data.fd = cs->listen_fd; + + if (epoll_ctl(cs->epoll_fd, EPOLL_CTL_ADD, cs->listen_fd, &events) < 0) { + close(cs->listen_fd); + close(cs->epoll_fd); + } +} +#endif + +static void send_keep_alive_msg(ECS_Client *clii) { + send_to_single_client(clii, keepalive_buf, payloadsize); +} + +static void make_keep_alive_msg(void) { + int len_pack = 0; + char msg [5] = {'s','e','l','f'}; + + ECS__Master master = ECS__MASTER__INIT; + ECS__KeepAliveReq req = ECS__KEEP_ALIVE_REQ__INIT; + + req.time_str = (char*) g_malloc(5); + + strncpy(req.time_str, msg, 4); + + master.type = ECS__MASTER__TYPE__KEEPALIVE_REQ; + master.keepalive_req = &req; + + len_pack = ecs__master__get_packed_size(&master); + payloadsize = len_pack + 4; + + keepalive_buf = g_malloc(len_pack + 4); + if (!keepalive_buf) { + ERR("keep alive message creation is failed.\n"); + return; + } + + ecs__master__pack(&master, keepalive_buf + 4); + + len_pack = htonl(len_pack); + memcpy(keepalive_buf, &len_pack, 4); +} + +static void alive_checker(void *opaque) { + + ECS_Client *clii, *next; + + if (NULL != current_ecs && !current_ecs->ecs_running) { + return; + } + + QTAILQ_FOREACH_SAFE(clii, &clients, next, next) + { + if (1 == clii->keep_alive) { + INFO("get client fd %d - keep alive fail\n", clii->client_fd); + ecs_client_close(clii); + continue; + } + TRACE("set client fd %d - keep alive 1\n", clii->client_fd); + clii->keep_alive = 1; + send_keep_alive_msg(clii); + } + + if (current_ecs == NULL) { + ERR("alive checking is failed because current ecs is null.\n"); + return; + } + + timer_mod(current_ecs->alive_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() * TIMER_ALIVE_S); + +} + +static int socket_initialize(ECS_State *cs, QemuOpts *opts) { + int fd = -1; + Error *local_err = NULL; + + fd = inet_listen_opts(opts, 0, &local_err); - if (0 > fd || error_is_set(&local_err)) { ++ if (0 > fd || local_err) { + qerror_report_err(local_err); + error_free(local_err); + return -1; + } + + INFO("Listen fd is %d\n", fd); + + qemu_set_nonblock(fd); + + cs->listen_fd = fd; + +#ifdef CONFIG_LINUX + epoll_init(cs); +#else + FD_ZERO(&cs->reads); + FD_SET(fd, &cs->reads); +#endif + + make_keep_alive_msg(); + + cs->alive_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, alive_checker, cs); + + timer_mod(cs->alive_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() * TIMER_ALIVE_S); + + return 0; +} + +#ifdef CONFIG_LINUX +static int ecs_loop(ECS_State *cs) { + int i, nfds; + + nfds = epoll_wait(cs->epoll_fd, cs->events, MAX_EVENTS, 100); + if (0 == nfds) { + return 0; + } + + if (0 > nfds) { + if (errno == EINTR) + return 0; + perror("epoll wait error"); + return -1; + } + + for (i = 0; i < nfds; i++) { + if (cs->events[i].data.fd == cs->listen_fd) { + ecs_accept(cs); + continue; + } + ecs_read(ecs_find_client(cs->events[i].data.fd)); + } + + return 0; +} +#elif defined(CONFIG_WIN32) +static int ecs_loop(ECS_State *cs) +{ + int index = 0; + TIMEVAL timeout; + fd_set temps = cs->reads; + + timeout.tv_sec = 5; + timeout.tv_usec = 0; + + if (select(0, &temps, 0, 0, &timeout) < 0) { + ERR("select error.\n"); + return -1; + } + + for (index = 0; index < cs->reads.fd_count; index++) { + if (cs->reads.fd_array == NULL) + continue; + if (FD_ISSET(cs->reads.fd_array[index], &temps)) { + if (cs->reads.fd_array[index] == cs->listen_fd) { + ecs_accept(cs); + continue; + } + ecs_read(ecs_find_client(cs->reads.fd_array[index])); + } + } + return 0; +} +#elif defined(CONFIG_DARWIN) +static int ecs_loop(ECS_State *cs) +{ + int index = 0; + int res = 0; + struct timeval timeout; + fd_set temps = cs->reads; + + timeout.tv_sec = 5; + timeout.tv_usec = 0; + + if ((res = select(MAX_FD_NUM + 1, &temps, NULL, NULL, &timeout)) < 0) { + ERR("select failed..\n"); + return -1; + } + + for (index = 0; index < MAX_FD_NUM; index ++) { + if (FD_ISSET(index, &temps)) { + if (index == cs->listen_fd) { + ecs_accept(cs); + continue; + } + + ecs_read(ecs_find_client(index)); + } + } + + return 0; +} + +#endif + +static void* ecs_initialize(void* args) { + int ret = 1; + ECS_State *cs = NULL; + QemuOpts *opts = NULL; + Error *local_err = NULL; + Monitor* mon = NULL; + char host_port[16]; + int port = 0; + + INFO("ecs starts initializing.\n"); + + opts = qemu_opts_create(qemu_find_opts(ECS_OPTS_NAME), ECS_OPTS_NAME, 1, &local_err); - if (error_is_set(&local_err)) { ++ if (local_err) { + qerror_report_err(local_err); + error_free(local_err); + return NULL; + } + + qemu_opt_set(opts, "host", HOST_LISTEN_ADDR); + + cs = g_malloc0(sizeof(ECS_State)); + if (NULL == cs) { + ERR("ECS_State allocation failed.\n"); + return NULL; + } + port = get_emul_ecs_port(); + INFO("ecs port: %d\n", port); + sprintf(host_port, "%d", port); + + qemu_opt_set(opts, "port", host_port); + ret = socket_initialize(cs, opts); + if (ret < 0) { + ERR("Socket initialization is failed.\n"); + ecs_close(cs); + return NULL; + } + + mon = monitor_create(); + if (NULL == mon) { + ERR("monitor initialization failed.\n"); + ecs_close(cs); + return NULL; + } + + cs->mon = mon; + current_ecs = cs; + cs->ecs_running = 1; + + qemu_mutex_init(&mutex_clilist); + qemu_mutex_init(&mutex_guest_connection); + qemu_mutex_init(&mutex_location_data); + + TRACE("ecs_loop entered.\n"); + while (cs->ecs_running) { + ret = ecs_loop(cs); + if (0 > ret) { + break; + } + } + TRACE("ecs_loop exited.\n"); + + ecs_close(cs); + + return NULL; +} + +static int stop_ecs(void) { + void *ret = NULL; + + INFO("ecs is closing.\n"); + if (NULL != current_ecs) { + current_ecs->ecs_running = 0; + } + + ret = qemu_thread_join(&ecs_thread_id); + if (ret) { + ERR("ecs is failed to join thread.\n"); + return -1; + } + + return 0; +} + +static void ecs_notify_exit(Notifier *notifier, void *data) { + stop_ecs(); +} +static Notifier ecs_exit = { .notify = ecs_notify_exit }; + +int start_ecs(void) { + qemu_add_opts(&qemu_ecs_opts); + + qemu_thread_create(&ecs_thread_id, "ecs", ecs_initialize, NULL, QEMU_THREAD_JOINABLE); + + emulator_add_exit_notifier(&ecs_exit); + + return 0; +} + +bool handle_protobuf_msg(ECS_Client* cli, char* data, int len) +{ + ECS__Master* master = ecs__master__unpack(NULL, (size_t)len, (const uint8_t*)data); + if (!master) + return false; + + if (master->type == ECS__MASTER__TYPE__INJECTOR_REQ) + { + ECS__InjectorReq* msg = master->injector_req; + if (!msg) + goto fail; + msgproc_injector_req(cli, msg); + } + else if (master->type == ECS__MASTER__TYPE__MONITOR_REQ) + { + ECS__MonitorReq* msg = master->monitor_req; + if (!msg) + goto fail; + msgproc_monitor_req(cli, msg); + } + else if (master->type == ECS__MASTER__TYPE__DEVICE_REQ) + { + cli->client_type = TYPE_ECP; + ECS__DeviceReq* msg = master->device_req; + if (!msg) + goto fail; + msgproc_device_req(cli, msg); + } + else if (master->type == ECS__MASTER__TYPE__NFC_REQ) + { + ECS__NfcReq* msg = master->nfc_req; + if (!msg) + goto fail; + + qemu_mutex_lock(&mutex_clilist); + if(cli->client_type == TYPE_NONE) { + if (!strncmp(msg->category, MSG_TYPE_NFC, 3)) { + QTAILQ_REMOVE(&clients, cli, next); + cli->client_type = TYPE_ECP; + if(g_client_id > 255) { + g_client_id = 1; + } + cli->client_id = g_client_id++; + + QTAILQ_INSERT_TAIL(&clients, cli, next); + } + else if (!strncmp(msg->category, MSG_TYPE_SIMUL_NFC, 9)) { + QTAILQ_REMOVE(&clients, cli, next); + cli->client_type = TYPE_SIMUL_NFC; + if(g_client_id > 255) { + g_client_id = 1; + } + cli->client_id = g_client_id++; + QTAILQ_INSERT_TAIL(&clients, cli, next); + } + else { + ERR("unsupported category is found: %s\n", msg->category); + qemu_mutex_unlock(&mutex_clilist); + goto fail; + } + } + qemu_mutex_unlock(&mutex_clilist); + + msgproc_nfc_req(cli, msg); + } +#if 0 + else if (master->type == ECS__MASTER__TYPE__CHECKVERSION_REQ) + { + ECS__CheckVersionReq* msg = master->checkversion_req; + if (!msg) + goto fail; + msgproc_checkversion_req(cli, msg); + } +#endif + else if (master->type == ECS__MASTER__TYPE__KEEPALIVE_ANS) + { + ECS__KeepAliveAns* msg = master->keepalive_ans; + if (!msg) + goto fail; + msgproc_keepalive_ans(cli, msg); + } + else if (master->type == ECS__MASTER__TYPE__EVENTCAST_REQ) + { + ECS__EventCastReq* msg = master->eventcast_req; + if (!msg) + goto fail; + msgproc_eventcast_req(cli, msg); + } + + ecs__master__free_unpacked(master, NULL); + return true; +fail: + ERR("invalid message type : %d\n", master->type); + ecs__master__free_unpacked(master, NULL); + return false; +} diff --cc tizen/src/hw/maru_board.c index 592dfa45e2,0000000000..3d0b079290 mode 100644,000000..100644 --- a/tizen/src/hw/maru_board.c +++ b/tizen/src/hw/maru_board.c @@@ -1,81 -1,0 +1,81 @@@ +/* + * TIZEN base board + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd All Rights Reserved + * + * Contact: + * YeongKyoon Lee + * SeokYeon Hwang + * SangJin Kim + * KiTae Kim + * JinHyung Jo + * SungMin Ha + * MunKyu Im + * JiHye Kim + * GiWoong Kim + * DongKyun Yun + * DoHyung Hong + * Hyunjun Son + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + * x86 board from pc_piix.c... + * add some TIZEN-speciaized device... + */ + +#include "hw/boards.h" +#include "hw/i386/pc.h" + +#include "emulator_common.h" +#include "maru_pm.h" + +/* maru specialized device init */ +static void maru_device_init(void) +{ + // do nothing for now... +} + - extern void maru_pc_init_pci(QEMUMachineInitArgs *args); - static void maru_x86_board_init(QEMUMachineInitArgs *args) ++extern void maru_pc_init_pci(MachineState *args); ++static void maru_x86_board_init(MachineState *args) +{ + maru_pc_init_pci(args); + + maru_device_init(); +} + +static QEMUMachine maru_x86_machine = { + PC_DEFAULT_MACHINE_OPTIONS, + .name = "maru-x86-machine", + .alias = "maru-x86-machine", + .desc = "Maru Board (x86)", + .init = maru_x86_board_init, + .hot_add_cpu = pc_hot_add_cpu, + .no_parallel = 1, + .no_floppy = 1, + .no_cdrom = 1, + .no_sdcard = 1, + .default_machine_opts = "firmware=bios-256k.bin", + .default_boot_order = "c", +}; + +static void maru_machine_init(void) +{ + qemu_register_machine(&maru_x86_machine); +} + +machine_init(maru_machine_init); diff --cc tizen/src/hw/pci/maru_brightness.c index a6eb9a21e2,0000000000..b15f272e3e mode 100644,000000..100644 --- a/tizen/src/hw/pci/maru_brightness.c +++ b/tizen/src/hw/pci/maru_brightness.c @@@ -1,227 -1,0 +1,224 @@@ +/* + * Maru brightness device for VGA + * + * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd. All rights reserved. + * + * Contact: + * JinHyung Jo + * YeongKyoon Lee + * DongKyun Yun + * DoHyung Hong + * Hyunjun Son + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + */ + + +#include "hw/i386/pc.h" +#include "ui/console.h" +#include "hw/pci/pci.h" +#include "hw/maru_device_ids.h" +#include "maru_brightness.h" +#include "skin/maruskin_server.h" +#include "debug_ch.h" + +MULTI_DEBUG_CHANNEL(tizen, brightness); + +#define QEMU_DEV_NAME "maru-brightness" + +#define BRIGHTNESS_MEM_SIZE (4 * 1024) /* 4KB */ +#define BRIGHTNESS_REG_SIZE 256 + +typedef struct BrightnessState { + PCIDevice dev; + MemoryRegion mmio_addr; +} BrightnessState; + +enum { + BRIGHTNESS_LEVEL = 0x00, + BRIGHTNESS_OFF = 0x04, +}; + +uint32_t brightness_level = BRIGHTNESS_MAX; +bool display_off; +pixman_color_t level_color; +pixman_image_t *brightness_image; + +/* level : 1 ~ 100, interval : 1 or 2 */ +uint8_t brightness_tbl[] = {155, /* level 0 : for dimming */ +/* level 01 ~ 10 */ 149, 147, 146, 144, 143, 141, 140, 138, 137, 135, +/* level 11 ~ 20 */ 134, 132, 131, 129, 128, 126, 125, 123, 122, 120, +/* level 21 ~ 30 */ 119, 117, 116, 114, 113, 111, 110, 108, 107, 105, +/* level 31 ~ 40 */ 104, 102, 101, 99, 98, 96, 95, 93, 92, 90, +/* level 41 ~ 50 */ 89, 87, 86, 84, 83, 81, 80, 78, 77, 75, +/* level 51 ~ 60 */ 74, 72, 71, 69, 68, 66, 65, 63, 62, 60, +/* level 61 ~ 70 */ 59, 57, 56, 54, 53, 51, 50, 48, 47, 45, +/* level 71 ~ 80 */ 44, 42, 41, 39, 38, 36, 35, 33, 32, 30, +/* level 81 ~ 90 */ 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, +/* level 91 ~ 99 */ 14, 12, 11, 9, 8, 6, 5, 3, 2, 0}; + +QEMUBH *display_bh; + +static uint64_t brightness_reg_read(void *opaque, + hwaddr addr, + unsigned size) +{ + switch (addr & 0xFF) { + case BRIGHTNESS_LEVEL: + INFO("current brightness level = %lu\n", brightness_level); + return brightness_level; + case BRIGHTNESS_OFF: + INFO("device is turned %s\n", display_off ? "off" : "on"); + return display_off; + default: + ERR("wrong brightness register read - addr : %d\n", (int)addr); + break; + } + + return 0; +} + +static void maru_pixman_image_set_alpha(uint8_t value) +{ + if (brightness_image) { + pixman_image_unref(brightness_image); + } + level_color.alpha = value << 8; + brightness_image = pixman_image_create_solid_fill(&level_color); + + graphic_hw_invalidate(NULL); +} + +static void brightness_reg_write(void *opaque, + hwaddr addr, + uint64_t val, + unsigned size) +{ + switch (addr & 0xFF) { + case BRIGHTNESS_LEVEL: + if (brightness_level == val) { + return; + } +#if BRIGHTNESS_MIN > 0 + if (val < BRIGHTNESS_MIN || val > BRIGHTNESS_MAX) { +#else + if (val > BRIGHTNESS_MAX) { +#endif + ERR("input value is out of range(%llu)\n", val); + } else { + INFO("level changes: %lu -> %llu\n", brightness_level, val); + brightness_level = val; + maru_pixman_image_set_alpha(brightness_tbl[brightness_level]); + } + return; + case BRIGHTNESS_OFF: + if (display_off == val) { + return; + } + + INFO("status changes: %s\n", val ? "OFF" : "ON"); + + display_off = val; + if (display_off) { + maru_pixman_image_set_alpha(0xFF); /* set black */ + } else { + maru_pixman_image_set_alpha(brightness_tbl[brightness_level]); + } + + /* notify to skin process */ + qemu_bh_schedule(display_bh); + + return; + default: + ERR("wrong brightness register write - addr : %d\n", (int)addr); + break; + } +} + +static const MemoryRegionOps brightness_mmio_ops = { + .read = brightness_reg_read, + .write = brightness_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void brightness_exitfn(PCIDevice *dev) +{ - BrightnessState *s = DO_UPCAST(BrightnessState, dev, dev); - + if (display_bh) { + qemu_bh_delete(display_bh); + } + if (brightness_image) { + pixman_image_unref(brightness_image); + brightness_image = NULL; + } + - memory_region_destroy(&s->mmio_addr); + INFO("finalize maru-brightness device\n"); +} + +static void maru_display_bh(void *opaque) +{ + notify_display_power(!display_off); +} + +static int brightness_initfn(PCIDevice *dev) +{ + BrightnessState *s = DO_UPCAST(BrightnessState, dev, dev); + uint8_t *pci_conf = s->dev.config; + + pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_TIZEN); + pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_VIRTUAL_BRIGHTNESS); + pci_config_set_class(pci_conf, PCI_CLASS_DISPLAY_OTHER); + + memory_region_init_io(&s->mmio_addr, OBJECT(s), &brightness_mmio_ops, s, + "maru-brightness-mmio", BRIGHTNESS_REG_SIZE); + pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio_addr); + + display_bh = qemu_bh_new(maru_display_bh, s); + brightness_level = BRIGHTNESS_MAX; + level_color.alpha = 0x0000; + level_color.red = 0x0000; + level_color.green = 0x0000; + level_color.blue = 0x0000; + brightness_image = pixman_image_create_solid_fill(&level_color); + INFO("initialize maru-brightness device\n"); + + return 0; +} + +static void brightness_classinit(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->init = brightness_initfn; + k->exit = brightness_exitfn; +} + +static TypeInfo brightness_info = { + .name = QEMU_DEV_NAME, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(BrightnessState), + .class_init = brightness_classinit, +}; + +static void brightness_register_types(void) +{ + type_register_static(&brightness_info); +} + +type_init(brightness_register_types); diff --cc tizen/src/hw/pci/maru_brillcodec_device.c index 15e02877d6,0000000000..e7cb4f3c88 mode 100644,000000..100644 --- a/tizen/src/hw/pci/maru_brillcodec_device.c +++ b/tizen/src/hw/pci/maru_brillcodec_device.c @@@ -1,368 -1,0 +1,365 @@@ +/* + * Virtual Codec Device + * + * Copyright (c) 2013 - 2014 Samsung Electronics Co., Ltd All Rights Reserved + * + * Contact: + * Kitae Kim + * SeokYeon Hwang + * YeongKyoon Lee + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + */ + +#define SUPPORT_MEMORY_MONOPOLIZING +//#define ENCODE_VIDEO_USE_MEMORY_MONOPOLIZING + +#include "qemu/main-loop.h" +#include "hw/pci/pci.h" + +#include "hw/maru_device_ids.h" +#include "util/osutil.h" +#include "maru_brillcodec.h" +#include "debug_ch.h" + +/* define debug channel */ +MULTI_DEBUG_CHANNEL(qemu, brillcodec); + +#define CODEC_DEVICE_NAME "codec-pci" +#define CODEC_DEVICE_THREAD "codec-workthread" + +#define CODEC_MAJOR_VERSION 3 +#define CODEC_MINOR_VERSION 0 + +#define CODEC_REG_SIZE (256) +#define DEFAULT_WORKER_THREAD_CNT 8 + +enum device_cmd { + DEVICE_CMD_API_INDEX = 0, + DEVICE_CMD_CONTEXT_INDEX, + DEVICE_CMD_DEVICE_MEM_OFFSET, + DEVICE_CMD_GET_THREAD_STATE, + DEVICE_CMD_GET_CTX_FROM_QUEUE, + DEVICE_CMD_GET_DATA_FROM_QUEUE, + DEVICE_CMD_RELEASE_CONTEXT, + DEVICE_CMD_GET_ELEMENT, + DEVICE_CMD_GET_CONTEXT_INDEX, + DEVICE_CMD_GET_DEVICE_INFO, + DEVICE_CMD_GET_PROFILE_STATUS, +}; + +enum thread_state { + CODEC_TASK_START = 0, + CODEC_TASK_END = 0x1f, +}; + +static void brillcodec_threads_create(MaruBrillCodecState *s) +{ + int index; + QemuThread *pthread = NULL; + + TRACE("enter: %s\n", __func__); + + pthread = g_malloc(sizeof(QemuThread) * s->worker_thread_cnt); + if (!pthread) { + ERR("failed to allocate threadpool memory.\n"); + return; + } + + qemu_cond_init(&s->threadpool.cond); + qemu_mutex_init(&s->threadpool.mutex); + + s->is_thread_running = true; + + qemu_mutex_lock(&s->context_mutex); + s->idle_thread_cnt = 0; + qemu_mutex_unlock(&s->context_mutex); + + for (index = 0; index < s->worker_thread_cnt; index++) { + qemu_thread_create(&pthread[index], CODEC_DEVICE_THREAD, + brillcodec_threads, (void *)s, QEMU_THREAD_JOINABLE); + } + + s->threadpool.threads = pthread; + + TRACE("leave: %s\n", __func__); +} + +static void brillcodec_get_cpu_cores(MaruBrillCodecState *s) +{ + s->worker_thread_cnt = get_number_of_processors(); + if (s->worker_thread_cnt < DEFAULT_WORKER_THREAD_CNT) { + s->worker_thread_cnt = DEFAULT_WORKER_THREAD_CNT; + } + + TRACE("number of threads: %d\n", s->worker_thread_cnt); +} + +static void brillcodec_bh_callback(void *opaque) +{ + MaruBrillCodecState *s = (MaruBrillCodecState *)opaque; + + TRACE("enter: %s\n", __func__); + + qemu_mutex_lock(&s->context_queue_mutex); + if (!QTAILQ_EMPTY(&codec_wq)) { + qemu_mutex_unlock(&s->context_queue_mutex); + + TRACE("raise irq\n"); + pci_set_irq(&s->dev, 1); + s->irq_raised = 1; + } else { + qemu_mutex_unlock(&s->context_queue_mutex); + TRACE("codec_wq is empty!!\n"); + } + + TRACE("leave: %s\n", __func__); +} + +static uint64_t brillcodec_read(void *opaque, + hwaddr addr, + unsigned size) +{ + MaruBrillCodecState *s = (MaruBrillCodecState *)opaque; + uint64_t ret = 0; + + switch (addr >> 2) { + case DEVICE_CMD_GET_THREAD_STATE: + qemu_mutex_lock(&s->context_queue_mutex); + if (s->irq_raised) { + ret = CODEC_TASK_END; + pci_set_irq(&s->dev, 0); + s->irq_raised = 0; + } + qemu_mutex_unlock(&s->context_queue_mutex); + + TRACE("get thread_state. ret: %d\n", ret); + break; + + case DEVICE_CMD_GET_CTX_FROM_QUEUE: + { + DeviceMemEntry *head = NULL; + + qemu_mutex_lock(&s->context_queue_mutex); + head = QTAILQ_FIRST(&codec_wq); + if (head) { + ret = head->ctx_id; + QTAILQ_REMOVE(&codec_wq, head, node); + entry[ret] = head; + TRACE("get a elem from codec_wq. 0x%x\n", head); + } else { + ret = 0; + } + qemu_mutex_unlock(&s->context_queue_mutex); + + TRACE("get a head from a writequeue. head: %x\n", ret); + } + break; + + case DEVICE_CMD_GET_DEVICE_INFO: + ret |= CODEC_MAJOR_VERSION << 8; + ret |= CODEC_MINOR_VERSION; + TRACE("codec version: %d.%d.%d\n", CODEC_MAJOR_VERSION, CODEC_MINOR_VERSION, 0); + + ret |= s->memory_monopolizing << 16; + break; + + case DEVICE_CMD_GET_ELEMENT: + ret = brillcodec_query_list(s); + break; + + case DEVICE_CMD_GET_CONTEXT_INDEX: + ret = brillcodec_get_context_index(s); + TRACE("get context index: %d\n", ret); + break; + + case DEVICE_CMD_GET_PROFILE_STATUS: + ret = s->profile; + TRACE("profile status: %d\n", s->profile); + break; + + default: + ERR("no avaiable command for read. %d\n", addr); + } + + return ret; +} + +static void brillcodec_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + MaruBrillCodecState *s = (MaruBrillCodecState *)opaque; + + switch (addr >> 2) { + case DEVICE_CMD_API_INDEX: + TRACE("set codec_cmd value: %d\n", value); + s->ioparam.api_index = value; + brillcodec_wakeup_threads(s, value); + break; + + case DEVICE_CMD_CONTEXT_INDEX: + TRACE("set context_index value: %d\n", value); + s->ioparam.ctx_index = value; + break; + + case DEVICE_CMD_DEVICE_MEM_OFFSET: + TRACE("set mem_offset value: 0x%x\n", value); + s->ioparam.mem_offset = value; + break; + + case DEVICE_CMD_RELEASE_CONTEXT: + { + int ctx_id = (int32_t)value; + + if (CONTEXT(s, ctx_id)->occupied_thread) { + CONTEXT(s, ctx_id)->requested_close = true; + INFO("make running thread to handle deinit\n"); + } else { + brillcodec_release_context(s, ctx_id); + } + } + break; + + case DEVICE_CMD_GET_DATA_FROM_QUEUE: + brillcodec_pop_writequeue(s, (uint32_t)value); + break; + + default: + ERR("no available command for write. %d\n", addr); + } +} + +static const MemoryRegionOps brillcodec_mmio_ops = { + .read = brillcodec_read, + .write = brillcodec_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static int brillcodec_initfn(PCIDevice *dev) +{ + MaruBrillCodecState *s = DO_UPCAST(MaruBrillCodecState, dev, dev); + uint8_t *pci_conf = s->dev.config; + + INFO("device initialization.\n"); + INFO("version: %d.%d.%d\n", CODEC_MAJOR_VERSION, CODEC_MINOR_VERSION, 0); + + pci_config_set_interrupt_pin(pci_conf, 1); + - memory_region_init_ram(&s->vram, OBJECT(s), "maru_brill_codec.vram", CODEC_MEM_SIZE); ++ memory_region_init_ram(&s->vram, OBJECT(s), "maru_brill_codec.vram", CODEC_MEM_SIZE, &error_abort); + s->vaddr = (uint8_t *)memory_region_get_ram_ptr(&s->vram); + + memory_region_init_io(&s->mmio, OBJECT(s), &brillcodec_mmio_ops, s, + "maru_brill_codec.mmio", CODEC_REG_SIZE); + + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->vram); + pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio); + + qemu_mutex_init(&s->context_mutex); + qemu_mutex_init(&s->context_queue_mutex); + qemu_mutex_init(&s->ioparam_queue_mutex); + + brillcodec_get_cpu_cores(s); + brillcodec_threads_create(s); + + // register a function to qemu bottom-halves to switch context. + s->codec_bh = qemu_bh_new(brillcodec_bh_callback, s); + + // register plugins + if ((s->hwaccel_plugin = probe_plugin())) { + INFO("%s extension is enabled.\n", s->hwaccel_plugin->name); + } + + // configurations + s->memory_monopolizing = 0; +#ifdef SUPPORT_MEMORY_MONOPOLIZING +# ifdef ENCODE_VIDEO_USE_MEMORY_MONOPOLIZING + s->memory_monopolizing |= 1 << ENCODE_VIDEO; + INFO("API [%d] use memory monopolizing.\n", ENCODE_VIDEO); +# endif +#endif + + if (s->profile) { + INFO("Profile the brillcodec.(%d)\n", s->profile); + } + + return 0; +} + +static void brillcodec_exitfn(PCIDevice *dev) +{ + MaruBrillCodecState *s = DO_UPCAST(MaruBrillCodecState, dev, dev); + INFO("device exit\n"); + + qemu_mutex_destroy(&s->context_mutex); + qemu_mutex_destroy(&s->context_queue_mutex); + qemu_mutex_destroy(&s->ioparam_queue_mutex); + + qemu_bh_delete(s->codec_bh); - - memory_region_destroy(&s->vram); - memory_region_destroy(&s->mmio); +} + +static void brillcodec_reset(DeviceState *d) +{ + MaruBrillCodecState *s = (MaruBrillCodecState *)d; + INFO("device reset\n"); + + s->irq_raised = 0; + + memset(&s->context, 0, sizeof(CodecContext) * CODEC_CONTEXT_MAX); + memset(&s->ioparam, 0, sizeof(CodecParam)); +} + +static Property brillcodec_props[] = { + DEFINE_PROP_UINT8("profile", MaruBrillCodecState, profile, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void brillcodec_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->init = brillcodec_initfn; + k->exit = brillcodec_exitfn; + k->vendor_id = PCI_VENDOR_ID_TIZEN; + k->device_id = PCI_DEVICE_ID_VIRTUAL_BRILL_CODEC; + k->class_id = PCI_CLASS_OTHERS; + dc->reset = brillcodec_reset; + dc->props = brillcodec_props; + dc->desc = "Virtual new codec device for Tizen emulator"; +} + +static TypeInfo codec_device_info = { + .name = CODEC_DEVICE_NAME, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(MaruBrillCodecState), + .class_init = brillcodec_class_init, +}; + +static void codec_register_types(void) +{ + type_register_static(&codec_device_info); +} + +type_init(codec_register_types) diff --cc tizen/src/hw/pci/maru_camera_common_pci.c index 882d83311f,0000000000..d44df52439 mode 100644,000000..100644 --- a/tizen/src/hw/pci/maru_camera_common_pci.c +++ b/tizen/src/hw/pci/maru_camera_common_pci.c @@@ -1,316 -1,0 +1,314 @@@ +/* + * Common implementation of MARU Virtual Camera device by PCI bus. + * + * Copyright (c) 2011 - 2013 Samsung Electronics Co., Ltd All Rights Reserved + * + * Contact: + * JinHyung Jo + * YeongKyoon Lee + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + * + * Contributors: + * - S-Core Co., Ltd + * + */ + + +#include +#include +#include +#include +#include +#include + +#include "qemu-common.h" +#include "qemu/main-loop.h" +#include "exec/cpu-common.h" + +#include "maru_camera_common.h" +#include "hw/maru_device_ids.h" +#include "debug_ch.h" + +MULTI_DEBUG_CHANNEL(tizen, camera); + +#define MARU_PCI_CAMERA_DEVICE_NAME "maru-camera" + +#define MARUCAM_MEM_SIZE (4 * 1024 * 1024) /* 4MB */ +#define MARUCAM_REG_SIZE (256) /* 64 * 4Byte */ + +/* + * I/O functions + */ +static inline uint32_t +marucam_mmio_read(void *opaque, hwaddr offset) +{ + uint32_t ret = 0; + MaruCamState *state = (MaruCamState *)opaque; + + switch (offset & 0xFF) { + case MARUCAM_CMD_ISR: + qemu_mutex_lock(&state->thread_mutex); + ret = state->isr; + if (ret != 0) { + pci_set_irq(&state->dev, 0); + state->isr = 0; + } + qemu_mutex_unlock(&state->thread_mutex); + break; + case MARUCAM_CMD_G_DATA: + ret = state->param->stack[state->param->top++]; + break; + case MARUCAM_CMD_OPEN: + case MARUCAM_CMD_CLOSE: + case MARUCAM_CMD_START_PREVIEW: + case MARUCAM_CMD_STOP_PREVIEW: + case MARUCAM_CMD_S_PARAM: + case MARUCAM_CMD_G_PARAM: + case MARUCAM_CMD_ENUM_FMT: + case MARUCAM_CMD_TRY_FMT: + case MARUCAM_CMD_S_FMT: + case MARUCAM_CMD_G_FMT: + case MARUCAM_CMD_QCTRL: + case MARUCAM_CMD_S_CTRL: + case MARUCAM_CMD_G_CTRL: + case MARUCAM_CMD_ENUM_FSIZES: + case MARUCAM_CMD_ENUM_FINTV: + ret = state->param->errCode; + state->param->errCode = 0; + break; + default: + ERR("Not supported command: 0x%x\n", offset); + ret = EINVAL; + break; + } + return ret; +} + +static inline void +marucam_mmio_write(void *opaque, hwaddr offset, uint32_t value) +{ + MaruCamState *state = (MaruCamState *)opaque; + + switch (offset & 0xFF) { + case MARUCAM_CMD_OPEN: + marucam_device_open(state); + break; + case MARUCAM_CMD_CLOSE: + marucam_device_close(state); + break; + case MARUCAM_CMD_START_PREVIEW: + marucam_device_start_preview(state); + break; + case MARUCAM_CMD_STOP_PREVIEW: + marucam_device_stop_preview(state); + memset(state->vaddr, 0, MARUCAM_MEM_SIZE); + break; + case MARUCAM_CMD_S_PARAM: + marucam_device_s_param(state); + break; + case MARUCAM_CMD_G_PARAM: + marucam_device_g_param(state); + break; + case MARUCAM_CMD_ENUM_FMT: + marucam_device_enum_fmt(state); + break; + case MARUCAM_CMD_TRY_FMT: + marucam_device_try_fmt(state); + break; + case MARUCAM_CMD_S_FMT: + marucam_device_s_fmt(state); + break; + case MARUCAM_CMD_G_FMT: + marucam_device_g_fmt(state); + break; + case MARUCAM_CMD_QCTRL: + marucam_device_qctrl(state); + break; + case MARUCAM_CMD_S_CTRL: + marucam_device_s_ctrl(state); + break; + case MARUCAM_CMD_G_CTRL: + marucam_device_g_ctrl(state); + break; + case MARUCAM_CMD_ENUM_FSIZES: + marucam_device_enum_fsizes(state); + break; + case MARUCAM_CMD_ENUM_FINTV: + marucam_device_enum_fintv(state); + break; + case MARUCAM_CMD_S_DATA: + state->param->stack[state->param->top++] = value; + break; + case MARUCAM_CMD_DATACLR: + memset(state->param, 0, sizeof(MaruCamParam)); + break; + case MARUCAM_CMD_REQFRAME: + qemu_mutex_lock(&state->thread_mutex); + state->req_frame = value + 1; + qemu_mutex_unlock(&state->thread_mutex); + break; + default: + ERR("Not supported command: 0x%x\n", offset); + break; + } +} + +static const MemoryRegionOps maru_camera_mmio_ops = { + .old_mmio = { + .read = { + marucam_mmio_read, + marucam_mmio_read, + marucam_mmio_read, + }, + .write = { + marucam_mmio_write, + marucam_mmio_write, + marucam_mmio_write, + }, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* + * QEMU bottom half funtion + */ +static void marucam_tx_bh(void *opaque) +{ + MaruCamState *state = (MaruCamState *)opaque; + + qemu_mutex_lock(&state->thread_mutex); + if (state->isr) { + pci_set_irq(&state->dev, 1); + } + qemu_mutex_unlock(&state->thread_mutex); +} + +/* + * Initialization function + */ + +static int marucam_initfn(PCIDevice *dev) +{ + MaruCamState *s = DO_UPCAST(MaruCamState, dev, dev); + uint8_t *pci_conf = s->dev.config; + + /* Check available webcam + * If there is not one, you can't use the camera. + */ + if (!marucam_device_check(1)) { + s->initialized = false; + ERR("Failed to check the camera device, " + "You can *not* use the camera\n"); + return 0; + } + + pci_config_set_interrupt_pin(pci_conf, 0x03); + - memory_region_init_ram(&s->vram, OBJECT(s), "marucamera.ram", MARUCAM_MEM_SIZE); ++ memory_region_init_ram(&s->vram, OBJECT(s), "marucamera.ram", MARUCAM_MEM_SIZE, ++ &error_abort); + s->vaddr = memory_region_get_ram_ptr(&s->vram); + memset(s->vaddr, 0, MARUCAM_MEM_SIZE); + + memory_region_init_io(&s->mmio, OBJECT(s), + &maru_camera_mmio_ops, + s, + "maru-camera-mmio", + MARUCAM_REG_SIZE); + + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->vram); + pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio); + + /* for worker thread */ + s->param = (MaruCamParam *)g_malloc0(sizeof(MaruCamParam)); + qemu_cond_init(&s->thread_cond); + qemu_mutex_init(&s->thread_mutex); + + marucam_device_init(s); + + s->tx_bh = qemu_bh_new(marucam_tx_bh, s); + s->initialized = true; + INFO("initialize maru-camera device\n"); + + return 0; +} + +/* + * Termination function + */ +static void marucam_exitfn(PCIDevice *pci_dev) +{ + MaruCamState *s = + OBJECT_CHECK(MaruCamState, pci_dev, MARU_PCI_CAMERA_DEVICE_NAME); + + if (s->initialized) { + marucam_device_exit(s); + g_free(s->param); + qemu_cond_destroy(&s->thread_cond); + qemu_mutex_destroy(&s->thread_mutex); - - memory_region_destroy(&s->vram); - memory_region_destroy(&s->mmio); + } + + INFO("finalize maru-camera device\n"); +} + +static void marucam_resetfn(DeviceState *d) +{ + MaruCamState *s = (MaruCamState *)d; + + if (s->initialized) { + marucam_device_close(s); + qemu_mutex_lock(&s->thread_mutex); + s->isr = s->streamon = s->req_frame = s->buf_size = 0; + qemu_mutex_unlock(&s->thread_mutex); + memset(s->vaddr, 0, MARUCAM_MEM_SIZE); + memset(s->param, 0x00, sizeof(MaruCamParam)); + INFO("reset maru-camera device\n"); + } +} + +int maru_camera_pci_init(PCIBus *bus) +{ + pci_create_simple(bus, -1, MARU_PCI_CAMERA_DEVICE_NAME); + return 0; +} + +static void maru_camera_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->init = marucam_initfn; + k->exit = marucam_exitfn; + k->vendor_id = PCI_VENDOR_ID_TIZEN; + k->device_id = PCI_DEVICE_ID_VIRTUAL_CAMERA; + k->class_id = PCI_CLASS_OTHERS; + dc->reset = marucam_resetfn; + dc->desc = "MARU Virtual Camera device for Tizen emulator"; +} + +static TypeInfo maru_camera_info = { + .name = MARU_PCI_CAMERA_DEVICE_NAME, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(MaruCamState), + .class_init = maru_camera_pci_class_init, +}; + +static void maru_camera_pci_register_types(void) +{ + type_register_static(&maru_camera_info); +} + +type_init(maru_camera_pci_register_types) diff --cc ui/console.c index 1d714e2624,258af5dfff..60d546e051 --- a/ui/console.c +++ b/ui/console.c @@@ -28,13 -28,9 +28,13 @@@ #include "qmp-commands.h" #include "sysemu/char.h" #include "trace.h" + #include "exec/memory.h" +#ifdef SDL_THREAD +extern QemuMutex sdl_mutex; +#endif + #define DEFAULT_BACKSCROLL 512 - #define MAX_CONSOLES 12 #define CONSOLE_CURSOR_PERIOD 500 typedef struct TextAttributes { @@@ -1334,11 -1357,8 +1365,12 @@@ void register_displaychangelistener(Dis } dcl->ops->dpy_gfx_switch(dcl, dummy); } + +#ifdef SDL_THREAD + qemu_mutex_unlock(&sdl_mutex); +#endif } + text_console_update_cursor(NULL); } void update_displaychangelistener(DisplayChangeListener *dcl, diff --cc ui/spice-core.c index b7d869b75a,6467fa4776..eeff388b8a --- a/ui/spice-core.c +++ b/ui/spice-core.c @@@ -35,14 -35,10 +35,14 @@@ #include "qapi/qmp/qjson.h" #include "qemu/notify.h" #include "migration/migration.h" - #include "monitor/monitor.h" #include "hw/hw.h" #include "ui/spice-display.h" + #include "qapi-event.h" +#ifdef CONFIG_MARU +extern int get_emul_spice_port(void); +#endif + /* core bits */ static SpiceServer *spice_server; diff --cc util/oslib-posix.c index f7a8325459,16fcec2f37..64f656cb21 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@@ -139,21 -118,14 +142,27 @@@ void *qemu_try_memalign(size_t alignmen return ptr; } + void *qemu_memalign(size_t alignment, size_t size) + { + return qemu_oom_check(qemu_try_memalign(alignment, size)); + } + +#ifdef CONFIG_MARU +void *preallocated_ram_ptr = NULL; +int preallocated_ram_size = -1; +#endif ++ /* alloc shared memory pages */ - void *qemu_anon_ram_alloc(size_t size) + void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment) { +#ifdef CONFIG_MARU + if (size == preallocated_ram_size && preallocated_ram_ptr) { + void *ptr = preallocated_ram_ptr; + preallocated_ram_ptr = NULL; + preallocated_ram_size = -1; + return ptr; + } +#endif size_t align = QEMU_VMALLOC_ALIGN; size_t total = size + align - getpagesize(); void *ptr = mmap(0, total, PROT_READ | PROT_WRITE, diff --cc util/oslib-win32.c index fed8de3896,87cfbe0834..2df5de4bd2 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@@ -170,11 -62,12 +174,17 @@@ void *qemu_try_memalign(size_t alignmen return ptr; } + void *qemu_memalign(size_t alignment, size_t size) + { + return qemu_oom_check(qemu_try_memalign(alignment, size)); + } + +#ifdef CONFIG_MARU +void *preallocated_ram_ptr = NULL; +int preallocated_ram_size = -1; +#endif - void *qemu_anon_ram_alloc(size_t size) ++ + void *qemu_anon_ram_alloc(size_t size, uint64_t *align) { void *ptr; diff --cc vl.c index 44da833e92,eb89d62906..62b711a519 --- a/vl.c +++ b/vl.c @@@ -127,17 -115,10 +126,19 @@@ int qemu_main(int argc, char **argv, ch #include "ui/qemu-spice.h" #include "qapi/string-input-visitor.h" + #include "qapi/opts-visitor.h" #include "qom/object_interfaces.h" + #include "qapi-event.h" +#ifdef CONFIG_MARU +#include "tizen/src/emulator.h" +#include "tizen/src/util/maru_err_table.h" +#include "tizen/src/emul_state.h" +#include "tizen/src/display/maru_display.h" +#include "tizen/src/skin/maruskin_operation.h" +#include "tizen/src/ecs/ecs.h" +#endif + #define DEFAULT_RAM_SIZE 128 #define MAX_VIRTIO_CONSOLES 1 @@@ -4105,18 -3862,6 +3991,18 @@@ int main(int argc, char **argv, char ** exit(0); } +#if defined(CONFIG_MARU) - preallocated_ram_ptr = qemu_anon_ram_alloc(ram_size); ++ preallocated_ram_ptr = qemu_anon_ram_alloc(ram_size, NULL); + if (preallocated_ram_ptr) { + preallocated_ram_size = ram_size; + } + + kernel_cmdline = qemu_opt_get(qemu_get_machine_opts(), "append"); + // Returned variable points different address from input variable. + kernel_cmdline = prepare_maru(kernel_cmdline); + qemu_opt_set(qemu_get_machine_opts(), "append", kernel_cmdline); +#endif + /* Open the logfile at this point, if necessary. We can't open the logfile * when encountering either of the logging options (-d or -D) because the * other one may be encountered later on the command line, changing the @@@ -4409,11 -4147,15 +4304,15 @@@ qemu_spice_init(); #endif - if (icount_option && (kvm_enabled() || xen_enabled() || hax_enabled())) { - fprintf(stderr, "-icount is not allowed with kvm or xen\n"); - exit(1); + cpu_ticks_init(); + if (icount_opts) { - if (kvm_enabled() || xen_enabled()) { ++ if (kvm_enabled() || xen_enabled() || hax_enabled()) { + fprintf(stderr, "-icount is not allowed with kvm or xen\n"); + exit(1); + } + configure_icount(icount_opts, &error_abort); + qemu_opts_del(icount_opts); } - configure_icount(icount_option); /* clean up network at qemu process termination */ atexit(&net_cleanup); @@@ -4543,18 -4255,16 +4412,17 @@@ qdev_machine_init(); - QEMUMachineInitArgs args = { .machine = machine, - .ram_size = ram_size, - .boot_order = boot_order, - .kernel_filename = kernel_filename, - .kernel_cmdline = kernel_cmdline, - .initrd_filename = initrd_filename, - .cpu_model = cpu_model }; + current_machine->ram_size = ram_size; + current_machine->maxram_size = maxram_size; + current_machine->ram_slots = ram_slots; + current_machine->boot_order = boot_order; + current_machine->cpu_model = cpu_model; - current_machine->init_args = args; - machine->init(¤t_machine->init_args); + machine_class->init(current_machine); + + realtime_init(); + // TODO: Check about it... audio_init(); cpu_synchronize_all_post_init(); @@@ -4575,32 -4281,9 +4443,35 @@@ if (qemu_opts_foreach(qemu_find_opts("device"), device_init_func, NULL, 1) != 0) exit(1); +#ifdef CONFIG_VIGS + // To support legacy VIGS options + if (enable_vigs) { + PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL); + PCIDevice *pci_dev = pci_create(pci_bus, -1, "vigs"); + if (vigs_backend) { + qdev_prop_set_string(&pci_dev->qdev, "backend", vigs_backend); + } else { + qdev_prop_set_string(&pci_dev->qdev, "backend", "gl"); + } + qdev_prop_set_string(&pci_dev->qdev, "wsi", "wsi0"); + qdev_init_nofail(&pci_dev->qdev); + } +#endif +#ifdef CONFIG_YAGL + // To support legacy YaGL options + if (yagl_enabled()) { + PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL); + PCIDevice *pci_dev = pci_create(pci_bus, -1, "yagl"); + if (enable_vigs) { + qdev_prop_set_string(&pci_dev->qdev, "wsi", "wsi0"); + } + qdev_init_nofail(&pci_dev->qdev); + } +#endif + + /* Did we create any drives that we failed to create a device for? */ + drive_check_orphaned(); + net_check_clients(); ds = init_displaystate();